function_name
stringlengths 1
63
| docstring
stringlengths 50
5.89k
| masked_code
stringlengths 50
882k
| implementation
stringlengths 169
12.9k
| start_line
int32 1
14.6k
| end_line
int32 16
14.6k
| file_content
stringlengths 274
882k
|
---|---|---|---|---|---|---|
generate_bubblesort
|
Generates addition data with the given string prefix (i.e. 'train', 'test') and the specified
number of examples.
:param prefix: String prefix for saving the file ('train', 'test')
:param num_examples: Number of examples to generate.
|
"""
generate_data.py
Core script for generating training/test addition data. First, generates random pairs of numbers,
then steps through an execution trace, computing the exact order of subroutines that need to be
called.
"""
import pickle
import numpy as np
from tasks.bubblesort.env.trace import Trace
# MASKED: generate_bubblesort function (lines 15-34)
|
def generate_bubblesort(prefix, num_examples, debug=False, maximum=10000000000, debug_every=1000):
"""
Generates addition data with the given string prefix (i.e. 'train', 'test') and the specified
number of examples.
:param prefix: String prefix for saving the file ('train', 'test')
:param num_examples: Number of examples to generate.
"""
data = []
for i in range(num_examples):
array = np.random.randint(10, size=5)
if debug and i % debug_every == 0:
traces = Trace(array, True).traces
else:
traces = Trace(array).traces
data.append((array, traces))
# print(data)
with open('tasks/bubblesort/data/{}.pik'.format(prefix), 'wb') as f:
pickle.dump(data, f)
| 15 | 34 |
"""
generate_data.py
Core script for generating training/test addition data. First, generates random pairs of numbers,
then steps through an execution trace, computing the exact order of subroutines that need to be
called.
"""
import pickle
import numpy as np
from tasks.bubblesort.env.trace import Trace
def generate_bubblesort(prefix, num_examples, debug=False, maximum=10000000000, debug_every=1000):
"""
Generates addition data with the given string prefix (i.e. 'train', 'test') and the specified
number of examples.
:param prefix: String prefix for saving the file ('train', 'test')
:param num_examples: Number of examples to generate.
"""
data = []
for i in range(num_examples):
array = np.random.randint(10, size=5)
if debug and i % debug_every == 0:
traces = Trace(array, True).traces
else:
traces = Trace(array).traces
data.append((array, traces))
# print(data)
with open('tasks/bubblesort/data/{}.pik'.format(prefix), 'wb') as f:
pickle.dump(data, f)
|
get_age
|
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it won't work.
|
"""Helper methods to handle the time in Home Assistant."""
import datetime as dt
import re
from typing import Any, Dict, List, Optional, Tuple, Union, cast
import pytz
import pytz.exceptions as pytzexceptions
import pytz.tzinfo as pytzinfo
from homeassistant.const import MATCH_ALL
DATE_STR_FORMAT = "%Y-%m-%d"
UTC = pytz.utc
DEFAULT_TIME_ZONE: dt.tzinfo = pytz.utc
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
DATETIME_RE = re.compile(
r"(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})"
r"[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})"
r"(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?"
r"(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$"
)
def set_default_time_zone(time_zone: dt.tzinfo) -> None:
"""Set a default time zone to be used when none is specified.
Async friendly.
"""
global DEFAULT_TIME_ZONE
# NOTE: Remove in the future in favour of typing
assert isinstance(time_zone, dt.tzinfo)
DEFAULT_TIME_ZONE = time_zone
def get_time_zone(time_zone_str: str) -> Optional[dt.tzinfo]:
"""Get time zone from string. Return None if unable to determine.
Async friendly.
"""
try:
return pytz.timezone(time_zone_str)
except pytzexceptions.UnknownTimeZoneError:
return None
def utcnow() -> dt.datetime:
"""Get now in UTC time."""
return dt.datetime.now(UTC)
def now(time_zone: Optional[dt.tzinfo] = None) -> dt.datetime:
"""Get now in specified time zone."""
return dt.datetime.now(time_zone or DEFAULT_TIME_ZONE)
def as_utc(dattim: dt.datetime) -> dt.datetime:
"""Return a datetime as UTC time.
Assumes datetime without tzinfo to be in the DEFAULT_TIME_ZONE.
"""
if dattim.tzinfo == UTC:
return dattim
if dattim.tzinfo is None:
dattim = DEFAULT_TIME_ZONE.localize(dattim) # type: ignore
return dattim.astimezone(UTC)
def as_timestamp(dt_value: dt.datetime) -> float:
"""Convert a date/time into a unix time (seconds since 1970)."""
if hasattr(dt_value, "timestamp"):
parsed_dt: Optional[dt.datetime] = dt_value
else:
parsed_dt = parse_datetime(str(dt_value))
if parsed_dt is None:
raise ValueError("not a valid date/time.")
return parsed_dt.timestamp()
def as_local(dattim: dt.datetime) -> dt.datetime:
"""Convert a UTC datetime object to local time zone."""
if dattim.tzinfo == DEFAULT_TIME_ZONE:
return dattim
if dattim.tzinfo is None:
dattim = UTC.localize(dattim)
return dattim.astimezone(DEFAULT_TIME_ZONE)
def utc_from_timestamp(timestamp: float) -> dt.datetime:
"""Return a UTC time from a timestamp."""
return UTC.localize(dt.datetime.utcfromtimestamp(timestamp))
def start_of_local_day(
dt_or_d: Union[dt.date, dt.datetime, None] = None
) -> dt.datetime:
"""Return local datetime object of start of day from date or datetime."""
if dt_or_d is None:
date: dt.date = now().date()
elif isinstance(dt_or_d, dt.datetime):
date = dt_or_d.date()
return DEFAULT_TIME_ZONE.localize( # type: ignore
dt.datetime.combine(date, dt.time())
)
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
def parse_datetime(dt_str: str) -> Optional[dt.datetime]:
"""Parse a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
"""
match = DATETIME_RE.match(dt_str)
if not match:
return None
kws: Dict[str, Any] = match.groupdict()
if kws["microsecond"]:
kws["microsecond"] = kws["microsecond"].ljust(6, "0")
tzinfo_str = kws.pop("tzinfo")
tzinfo: Optional[dt.tzinfo] = None
if tzinfo_str == "Z":
tzinfo = UTC
elif tzinfo_str is not None:
offset_mins = int(tzinfo_str[-2:]) if len(tzinfo_str) > 3 else 0
offset_hours = int(tzinfo_str[1:3])
offset = dt.timedelta(hours=offset_hours, minutes=offset_mins)
if tzinfo_str[0] == "-":
offset = -offset
tzinfo = dt.timezone(offset)
kws = {k: int(v) for k, v in kws.items() if v is not None}
kws["tzinfo"] = tzinfo
return dt.datetime(**kws)
def parse_date(dt_str: str) -> Optional[dt.date]:
"""Convert a date string to a date object."""
try:
return dt.datetime.strptime(dt_str, DATE_STR_FORMAT).date()
except ValueError: # If dt_str did not match our format
return None
def parse_time(time_str: str) -> Optional[dt.time]:
"""Parse a time string (00:20:00) into Time object.
Return None if invalid.
"""
parts = str(time_str).split(":")
if len(parts) < 2:
return None
try:
hour = int(parts[0])
minute = int(parts[1])
second = int(parts[2]) if len(parts) > 2 else 0
return dt.time(hour, minute, second)
except ValueError:
# ValueError if value cannot be converted to an int or not in range
return None
# Found in this gist: https://gist.github.com/zhangsen/1199964
# MASKED: get_age function (lines 175-217)
def parse_time_expression(parameter: Any, min_value: int, max_value: int) -> List[int]:
"""Parse the time expression part and return a list of times to match."""
if parameter is None or parameter == MATCH_ALL:
res = list(range(min_value, max_value + 1))
elif isinstance(parameter, str) and parameter.startswith("/"):
parameter = int(parameter[1:])
res = [x for x in range(min_value, max_value + 1) if x % parameter == 0]
elif not hasattr(parameter, "__iter__"):
res = [int(parameter)]
else:
res = list(sorted(int(x) for x in parameter))
for val in res:
if val < min_value or val > max_value:
raise ValueError(
"Time expression '{}': parameter {} out of range ({} to {})"
"".format(parameter, val, min_value, max_value)
)
return res
# pylint: disable=redefined-outer-name
def find_next_time_expression_time(
now: dt.datetime, seconds: List[int], minutes: List[int], hours: List[int]
) -> dt.datetime:
"""Find the next datetime from now for which the time expression matches.
The algorithm looks at each time unit separately and tries to find the
next one that matches for each. If any of them would roll over, all
time units below that are reset to the first matching value.
Timezones are also handled (the tzinfo of the now object is used),
including daylight saving time.
"""
if not seconds or not minutes or not hours:
raise ValueError("Cannot find a next time: Time expression never " "matches!")
def _lower_bound(arr: List[int], cmp: int) -> Optional[int]:
"""Return the first value in arr greater or equal to cmp.
Return None if no such value exists.
"""
left = 0
right = len(arr)
while left < right:
mid = (left + right) // 2
if arr[mid] < cmp:
left = mid + 1
else:
right = mid
if left == len(arr):
return None
return arr[left]
result = now.replace(microsecond=0)
# Match next second
next_second = _lower_bound(seconds, result.second)
if next_second is None:
# No second to match in this minute. Roll-over to next minute.
next_second = seconds[0]
result += dt.timedelta(minutes=1)
result = result.replace(second=next_second)
# Match next minute
next_minute = _lower_bound(minutes, result.minute)
if next_minute != result.minute:
# We're in the next minute. Seconds needs to be reset.
result = result.replace(second=seconds[0])
if next_minute is None:
# No minute to match in this hour. Roll-over to next hour.
next_minute = minutes[0]
result += dt.timedelta(hours=1)
result = result.replace(minute=next_minute)
# Match next hour
next_hour = _lower_bound(hours, result.hour)
if next_hour != result.hour:
# We're in the next hour. Seconds+minutes needs to be reset.
result = result.replace(second=seconds[0], minute=minutes[0])
if next_hour is None:
# No minute to match in this day. Roll-over to next day.
next_hour = hours[0]
result += dt.timedelta(days=1)
result = result.replace(hour=next_hour)
if result.tzinfo is None:
return result
# Now we need to handle timezones. We will make this datetime object
# "naive" first and then re-convert it to the target timezone.
# This is so that we can call pytz's localize and handle DST changes.
tzinfo: pytzinfo.DstTzInfo = result.tzinfo
result = result.replace(tzinfo=None)
try:
result = tzinfo.localize(result, is_dst=None)
except pytzexceptions.AmbiguousTimeError:
# This happens when we're leaving daylight saving time and local
# clocks are rolled back. In this case, we want to trigger
# on both the DST and non-DST time. So when "now" is in the DST
# use the DST-on time, and if not, use the DST-off time.
use_dst = bool(now.dst())
result = tzinfo.localize(result, is_dst=use_dst)
except pytzexceptions.NonExistentTimeError:
# This happens when we're entering daylight saving time and local
# clocks are rolled forward, thus there are local times that do
# not exist. In this case, we want to trigger on the next time
# that *does* exist.
# In the worst case, this will run through all the seconds in the
# time shift, but that's max 3600 operations for once per year
result = result.replace(tzinfo=tzinfo) + dt.timedelta(seconds=1)
return find_next_time_expression_time(result, seconds, minutes, hours)
result_dst = cast(dt.timedelta, result.dst())
now_dst = cast(dt.timedelta, now.dst())
if result_dst >= now_dst:
return result
# Another edge-case when leaving DST:
# When now is in DST and ambiguous *and* the next trigger time we *should*
# trigger is ambiguous and outside DST, the excepts above won't catch it.
# For example: if triggering on 2:30 and now is 28.10.2018 2:30 (in DST)
# we should trigger next on 28.10.2018 2:30 (out of DST), but our
# algorithm above would produce 29.10.2018 2:30 (out of DST)
# Step 1: Check if now is ambiguous
try:
tzinfo.localize(now.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
# Step 2: Check if result of (now - DST) is ambiguous.
check = now - now_dst
check_result = find_next_time_expression_time(check, seconds, minutes, hours)
try:
tzinfo.localize(check_result.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
# OK, edge case does apply. We must override the DST to DST-off
check_result = tzinfo.localize(check_result.replace(tzinfo=None), is_dst=False)
return check_result
|
def get_age(date: dt.datetime) -> str:
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it won't work.
"""
def formatn(number: int, unit: str) -> str:
"""Add "unit" if it's plural."""
if number == 1:
return f"1 {unit}"
return f"{number:d} {unit}s"
def q_n_r(first: int, second: int) -> Tuple[int, int]:
"""Return quotient and remaining."""
return first // second, first % second
delta = now() - date
day = delta.days
second = delta.seconds
year, day = q_n_r(day, 365)
if year > 0:
return formatn(year, "year")
month, day = q_n_r(day, 30)
if month > 0:
return formatn(month, "month")
if day > 0:
return formatn(day, "day")
hour, second = q_n_r(second, 3600)
if hour > 0:
return formatn(hour, "hour")
minute, second = q_n_r(second, 60)
if minute > 0:
return formatn(minute, "minute")
return formatn(second, "second")
| 175 | 217 |
"""Helper methods to handle the time in Home Assistant."""
import datetime as dt
import re
from typing import Any, Dict, List, Optional, Tuple, Union, cast
import pytz
import pytz.exceptions as pytzexceptions
import pytz.tzinfo as pytzinfo
from homeassistant.const import MATCH_ALL
DATE_STR_FORMAT = "%Y-%m-%d"
UTC = pytz.utc
DEFAULT_TIME_ZONE: dt.tzinfo = pytz.utc
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
DATETIME_RE = re.compile(
r"(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})"
r"[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})"
r"(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?"
r"(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$"
)
def set_default_time_zone(time_zone: dt.tzinfo) -> None:
"""Set a default time zone to be used when none is specified.
Async friendly.
"""
global DEFAULT_TIME_ZONE
# NOTE: Remove in the future in favour of typing
assert isinstance(time_zone, dt.tzinfo)
DEFAULT_TIME_ZONE = time_zone
def get_time_zone(time_zone_str: str) -> Optional[dt.tzinfo]:
"""Get time zone from string. Return None if unable to determine.
Async friendly.
"""
try:
return pytz.timezone(time_zone_str)
except pytzexceptions.UnknownTimeZoneError:
return None
def utcnow() -> dt.datetime:
"""Get now in UTC time."""
return dt.datetime.now(UTC)
def now(time_zone: Optional[dt.tzinfo] = None) -> dt.datetime:
"""Get now in specified time zone."""
return dt.datetime.now(time_zone or DEFAULT_TIME_ZONE)
def as_utc(dattim: dt.datetime) -> dt.datetime:
"""Return a datetime as UTC time.
Assumes datetime without tzinfo to be in the DEFAULT_TIME_ZONE.
"""
if dattim.tzinfo == UTC:
return dattim
if dattim.tzinfo is None:
dattim = DEFAULT_TIME_ZONE.localize(dattim) # type: ignore
return dattim.astimezone(UTC)
def as_timestamp(dt_value: dt.datetime) -> float:
"""Convert a date/time into a unix time (seconds since 1970)."""
if hasattr(dt_value, "timestamp"):
parsed_dt: Optional[dt.datetime] = dt_value
else:
parsed_dt = parse_datetime(str(dt_value))
if parsed_dt is None:
raise ValueError("not a valid date/time.")
return parsed_dt.timestamp()
def as_local(dattim: dt.datetime) -> dt.datetime:
"""Convert a UTC datetime object to local time zone."""
if dattim.tzinfo == DEFAULT_TIME_ZONE:
return dattim
if dattim.tzinfo is None:
dattim = UTC.localize(dattim)
return dattim.astimezone(DEFAULT_TIME_ZONE)
def utc_from_timestamp(timestamp: float) -> dt.datetime:
"""Return a UTC time from a timestamp."""
return UTC.localize(dt.datetime.utcfromtimestamp(timestamp))
def start_of_local_day(
dt_or_d: Union[dt.date, dt.datetime, None] = None
) -> dt.datetime:
"""Return local datetime object of start of day from date or datetime."""
if dt_or_d is None:
date: dt.date = now().date()
elif isinstance(dt_or_d, dt.datetime):
date = dt_or_d.date()
return DEFAULT_TIME_ZONE.localize( # type: ignore
dt.datetime.combine(date, dt.time())
)
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
def parse_datetime(dt_str: str) -> Optional[dt.datetime]:
"""Parse a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
"""
match = DATETIME_RE.match(dt_str)
if not match:
return None
kws: Dict[str, Any] = match.groupdict()
if kws["microsecond"]:
kws["microsecond"] = kws["microsecond"].ljust(6, "0")
tzinfo_str = kws.pop("tzinfo")
tzinfo: Optional[dt.tzinfo] = None
if tzinfo_str == "Z":
tzinfo = UTC
elif tzinfo_str is not None:
offset_mins = int(tzinfo_str[-2:]) if len(tzinfo_str) > 3 else 0
offset_hours = int(tzinfo_str[1:3])
offset = dt.timedelta(hours=offset_hours, minutes=offset_mins)
if tzinfo_str[0] == "-":
offset = -offset
tzinfo = dt.timezone(offset)
kws = {k: int(v) for k, v in kws.items() if v is not None}
kws["tzinfo"] = tzinfo
return dt.datetime(**kws)
def parse_date(dt_str: str) -> Optional[dt.date]:
"""Convert a date string to a date object."""
try:
return dt.datetime.strptime(dt_str, DATE_STR_FORMAT).date()
except ValueError: # If dt_str did not match our format
return None
def parse_time(time_str: str) -> Optional[dt.time]:
"""Parse a time string (00:20:00) into Time object.
Return None if invalid.
"""
parts = str(time_str).split(":")
if len(parts) < 2:
return None
try:
hour = int(parts[0])
minute = int(parts[1])
second = int(parts[2]) if len(parts) > 2 else 0
return dt.time(hour, minute, second)
except ValueError:
# ValueError if value cannot be converted to an int or not in range
return None
# Found in this gist: https://gist.github.com/zhangsen/1199964
def get_age(date: dt.datetime) -> str:
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it won't work.
"""
def formatn(number: int, unit: str) -> str:
"""Add "unit" if it's plural."""
if number == 1:
return f"1 {unit}"
return f"{number:d} {unit}s"
def q_n_r(first: int, second: int) -> Tuple[int, int]:
"""Return quotient and remaining."""
return first // second, first % second
delta = now() - date
day = delta.days
second = delta.seconds
year, day = q_n_r(day, 365)
if year > 0:
return formatn(year, "year")
month, day = q_n_r(day, 30)
if month > 0:
return formatn(month, "month")
if day > 0:
return formatn(day, "day")
hour, second = q_n_r(second, 3600)
if hour > 0:
return formatn(hour, "hour")
minute, second = q_n_r(second, 60)
if minute > 0:
return formatn(minute, "minute")
return formatn(second, "second")
def parse_time_expression(parameter: Any, min_value: int, max_value: int) -> List[int]:
"""Parse the time expression part and return a list of times to match."""
if parameter is None or parameter == MATCH_ALL:
res = list(range(min_value, max_value + 1))
elif isinstance(parameter, str) and parameter.startswith("/"):
parameter = int(parameter[1:])
res = [x for x in range(min_value, max_value + 1) if x % parameter == 0]
elif not hasattr(parameter, "__iter__"):
res = [int(parameter)]
else:
res = list(sorted(int(x) for x in parameter))
for val in res:
if val < min_value or val > max_value:
raise ValueError(
"Time expression '{}': parameter {} out of range ({} to {})"
"".format(parameter, val, min_value, max_value)
)
return res
# pylint: disable=redefined-outer-name
def find_next_time_expression_time(
now: dt.datetime, seconds: List[int], minutes: List[int], hours: List[int]
) -> dt.datetime:
"""Find the next datetime from now for which the time expression matches.
The algorithm looks at each time unit separately and tries to find the
next one that matches for each. If any of them would roll over, all
time units below that are reset to the first matching value.
Timezones are also handled (the tzinfo of the now object is used),
including daylight saving time.
"""
if not seconds or not minutes or not hours:
raise ValueError("Cannot find a next time: Time expression never " "matches!")
def _lower_bound(arr: List[int], cmp: int) -> Optional[int]:
"""Return the first value in arr greater or equal to cmp.
Return None if no such value exists.
"""
left = 0
right = len(arr)
while left < right:
mid = (left + right) // 2
if arr[mid] < cmp:
left = mid + 1
else:
right = mid
if left == len(arr):
return None
return arr[left]
result = now.replace(microsecond=0)
# Match next second
next_second = _lower_bound(seconds, result.second)
if next_second is None:
# No second to match in this minute. Roll-over to next minute.
next_second = seconds[0]
result += dt.timedelta(minutes=1)
result = result.replace(second=next_second)
# Match next minute
next_minute = _lower_bound(minutes, result.minute)
if next_minute != result.minute:
# We're in the next minute. Seconds needs to be reset.
result = result.replace(second=seconds[0])
if next_minute is None:
# No minute to match in this hour. Roll-over to next hour.
next_minute = minutes[0]
result += dt.timedelta(hours=1)
result = result.replace(minute=next_minute)
# Match next hour
next_hour = _lower_bound(hours, result.hour)
if next_hour != result.hour:
# We're in the next hour. Seconds+minutes needs to be reset.
result = result.replace(second=seconds[0], minute=minutes[0])
if next_hour is None:
# No minute to match in this day. Roll-over to next day.
next_hour = hours[0]
result += dt.timedelta(days=1)
result = result.replace(hour=next_hour)
if result.tzinfo is None:
return result
# Now we need to handle timezones. We will make this datetime object
# "naive" first and then re-convert it to the target timezone.
# This is so that we can call pytz's localize and handle DST changes.
tzinfo: pytzinfo.DstTzInfo = result.tzinfo
result = result.replace(tzinfo=None)
try:
result = tzinfo.localize(result, is_dst=None)
except pytzexceptions.AmbiguousTimeError:
# This happens when we're leaving daylight saving time and local
# clocks are rolled back. In this case, we want to trigger
# on both the DST and non-DST time. So when "now" is in the DST
# use the DST-on time, and if not, use the DST-off time.
use_dst = bool(now.dst())
result = tzinfo.localize(result, is_dst=use_dst)
except pytzexceptions.NonExistentTimeError:
# This happens when we're entering daylight saving time and local
# clocks are rolled forward, thus there are local times that do
# not exist. In this case, we want to trigger on the next time
# that *does* exist.
# In the worst case, this will run through all the seconds in the
# time shift, but that's max 3600 operations for once per year
result = result.replace(tzinfo=tzinfo) + dt.timedelta(seconds=1)
return find_next_time_expression_time(result, seconds, minutes, hours)
result_dst = cast(dt.timedelta, result.dst())
now_dst = cast(dt.timedelta, now.dst())
if result_dst >= now_dst:
return result
# Another edge-case when leaving DST:
# When now is in DST and ambiguous *and* the next trigger time we *should*
# trigger is ambiguous and outside DST, the excepts above won't catch it.
# For example: if triggering on 2:30 and now is 28.10.2018 2:30 (in DST)
# we should trigger next on 28.10.2018 2:30 (out of DST), but our
# algorithm above would produce 29.10.2018 2:30 (out of DST)
# Step 1: Check if now is ambiguous
try:
tzinfo.localize(now.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
# Step 2: Check if result of (now - DST) is ambiguous.
check = now - now_dst
check_result = find_next_time_expression_time(check, seconds, minutes, hours)
try:
tzinfo.localize(check_result.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
# OK, edge case does apply. We must override the DST to DST-off
check_result = tzinfo.localize(check_result.replace(tzinfo=None), is_dst=False)
return check_result
|
tokenizeChoice
|
Splits the `choice` into a series of tokens based on
the user's criteria.
If suffix indexing is enabled, the individual tokens
are further broken down and indexed by their suffix offsets. e.g.
'Banana', 'anana', 'nana', 'ana'
|
import re
import pygtrie as trie # type: ignore
from functools import reduce
__ALL__ = ('PrefixTokenizers', 'PrefixSearch')
class PrefixTokenizers:
# This string here is just an arbitrary long string so that
# re.split finds no matches and returns the entire phrase
ENTIRE_PHRASE = '::gooey/tokenization/entire-phrase'
# \s == any whitespace character
WORDS = r'\s'
@classmethod
def REGEX(cls, expression):
return expression
class OperatorType:
AND = 'AND'
OR = 'OR'
class SearchOptions:
def __init__(self,
choice_tokenizer=PrefixTokenizers.ENTIRE_PHRASE,
input_tokenizer=PrefixTokenizers.ENTIRE_PHRASE,
ignore_case=True,
operator='AND',
index_suffix= False,
**kwargs):
self.choice_tokenizer = choice_tokenizer
self.input_tokenizer = input_tokenizer
self.ignore_case = ignore_case
self.operator = operator
self.index_suffix = index_suffix
class PrefixSearch(object):
"""
A trie backed index for quickly finding substrings
in a list of options.
"""
def __init__(self, choices, options={}, *args, **kwargs):
self.choices = sorted(filter(None, choices))
self.options: SearchOptions = SearchOptions(**options)
self.searchtree = self.buildSearchTrie(choices)
def updateChoices(self, choices):
self.choices = sorted(filter(None, choices))
self.searchtree = self.buildSearchTrie(choices)
def findMatches(self, token):
if not token:
return sorted(self.choices)
tokens = self.tokenizeInput(token)
matches = [set(flatten(self._vals(self.searchtree, prefix=t))) for t in tokens]
op = intersection if self.options.operator == 'AND' else union
return sorted(reduce(op, matches))
def tokenizeInput(self, token):
"""
Cleans and tokenizes the user's input.
empty characters and spaces are trimmed to prevent
matching all paths in the index.
"""
return list(filter(None, re.split(self.options.input_tokenizer, self.clean(token))))
# MASKED: tokenizeChoice function (lines 73-90)
def clean(self, text):
return text.lower() if self.options.ignore_case else text
def buildSearchTrie(self, choices):
searchtrie = trie.Trie()
for choice in choices:
for token in self.tokenizeChoice(choice):
if not searchtrie.has_key(token):
searchtrie[token] = []
searchtrie[token].append(choice)
return searchtrie
def _vals(self, searchtrie, **kwargs):
try:
return searchtrie.values(**kwargs)
except KeyError:
return []
def intersection(a, b):
return a.intersection(b)
def union(a, b):
return a.union(b)
def flatten(xs):
return [item for x in xs for item in x]
|
def tokenizeChoice(self, choice):
"""
Splits the `choice` into a series of tokens based on
the user's criteria.
If suffix indexing is enabled, the individual tokens
are further broken down and indexed by their suffix offsets. e.g.
'Banana', 'anana', 'nana', 'ana'
"""
choice_ = self.clean(choice)
tokens = re.split(self.options.choice_tokenizer, choice_)
if self.options.index_suffix:
return [token[i:]
for token in tokens
for i in range(len(token) - 2)]
else:
return tokens
| 73 | 90 |
import re
import pygtrie as trie # type: ignore
from functools import reduce
__ALL__ = ('PrefixTokenizers', 'PrefixSearch')
class PrefixTokenizers:
# This string here is just an arbitrary long string so that
# re.split finds no matches and returns the entire phrase
ENTIRE_PHRASE = '::gooey/tokenization/entire-phrase'
# \s == any whitespace character
WORDS = r'\s'
@classmethod
def REGEX(cls, expression):
return expression
class OperatorType:
AND = 'AND'
OR = 'OR'
class SearchOptions:
def __init__(self,
choice_tokenizer=PrefixTokenizers.ENTIRE_PHRASE,
input_tokenizer=PrefixTokenizers.ENTIRE_PHRASE,
ignore_case=True,
operator='AND',
index_suffix= False,
**kwargs):
self.choice_tokenizer = choice_tokenizer
self.input_tokenizer = input_tokenizer
self.ignore_case = ignore_case
self.operator = operator
self.index_suffix = index_suffix
class PrefixSearch(object):
"""
A trie backed index for quickly finding substrings
in a list of options.
"""
def __init__(self, choices, options={}, *args, **kwargs):
self.choices = sorted(filter(None, choices))
self.options: SearchOptions = SearchOptions(**options)
self.searchtree = self.buildSearchTrie(choices)
def updateChoices(self, choices):
self.choices = sorted(filter(None, choices))
self.searchtree = self.buildSearchTrie(choices)
def findMatches(self, token):
if not token:
return sorted(self.choices)
tokens = self.tokenizeInput(token)
matches = [set(flatten(self._vals(self.searchtree, prefix=t))) for t in tokens]
op = intersection if self.options.operator == 'AND' else union
return sorted(reduce(op, matches))
def tokenizeInput(self, token):
"""
Cleans and tokenizes the user's input.
empty characters and spaces are trimmed to prevent
matching all paths in the index.
"""
return list(filter(None, re.split(self.options.input_tokenizer, self.clean(token))))
def tokenizeChoice(self, choice):
"""
Splits the `choice` into a series of tokens based on
the user's criteria.
If suffix indexing is enabled, the individual tokens
are further broken down and indexed by their suffix offsets. e.g.
'Banana', 'anana', 'nana', 'ana'
"""
choice_ = self.clean(choice)
tokens = re.split(self.options.choice_tokenizer, choice_)
if self.options.index_suffix:
return [token[i:]
for token in tokens
for i in range(len(token) - 2)]
else:
return tokens
def clean(self, text):
return text.lower() if self.options.ignore_case else text
def buildSearchTrie(self, choices):
searchtrie = trie.Trie()
for choice in choices:
for token in self.tokenizeChoice(choice):
if not searchtrie.has_key(token):
searchtrie[token] = []
searchtrie[token].append(choice)
return searchtrie
def _vals(self, searchtrie, **kwargs):
try:
return searchtrie.values(**kwargs)
except KeyError:
return []
def intersection(a, b):
return a.intersection(b)
def union(a, b):
return a.union(b)
def flatten(xs):
return [item for x in xs for item in x]
|
list_combinations_generator
|
Generates combinations for items in the given list.
Args:
modalities: List of modalities available in the dataset.
Returns:
Combinations of items in the given list.
|
# authors_name = 'Preetham Ganesh'
# project_title = 'Multi Sensor-based Human Activity Recognition using OpenCV and Sensor Fusion'
# email = '[email protected]'
import numpy as np
import os
import pandas as pd
import itertools
import logging
import sklearn.pipeline
from sklearn.metrics import accuracy_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import ParameterGrid
logging.getLogger('sklearn').setLevel(logging.FATAL)
# MASKED: list_combinations_generator function (lines 29-53)
def data_combiner(n_actions: int,
subject_ids: list,
n_takes: int,
modalities: list,
skeleton_pose_model: str):
"""Combines skeleton point information for all actions, all takes, given list of subject ids and given list of
modalities.
Args:
n_actions: Total number of actions in the original dataset.
subject_ids: List of subjects in the current set.
n_takes: Total number of takes in the original dataset.
modalities: Current combination of modalities.
skeleton_pose_model: Current skeleton pose model name which will be used to import skeleton point
information.
Returns:
A pandas dataframe which contains combined skeleton point information for all actions, all takes, given list
of subject ids and given list of modalities.
"""
combined_modality_skeleton_information = pd.DataFrame()
# Iterates across actions, subject_ids, takes, and modalities to combine skeleton point information.
for i in range(1, n_actions + 1):
for j in range(len(subject_ids)):
for k in range(1, n_takes + 1):
data_name = 'a{}_s{}_t{}'.format(i, subject_ids[j], k)
# Iterates across modalities to import skeleton point information file and adds it to
# combined_modality_skeleton_information. If file not found, it moves on to the next combination.
try:
# Imports 1st modality's skeleton point information for current data_name and skeleton_pose_model.
current_data_name_modality_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[0], data_name, skeleton_pose_model))
except FileNotFoundError:
continue
# Since, length of modalities in each combination is different. Hence, if length of modalities is
# greater than 1, then the imported skeleton point information for other modalities will be merged to
# the skeleton point information for the first modality.
if len(modalities) != 1:
for m in range(1, len(modalities)):
current_skeleton_point_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[m], data_name, skeleton_pose_model))
current_data_name_modality_information = pd.merge(current_data_name_modality_information,
current_skeleton_point_information,
on='frame', how='outer')
# Adds data_name to the imported skeleton point information.
current_data_name_modality_information['data_name'] = [data_name for _ in range(len(
current_data_name_modality_information))]
# Removes frame column from the imported skeleton point information.
current_data_name_modality_information = current_data_name_modality_information.drop(columns=['frame'])
# Adds action column to the imported skeleton point information.
current_data_name_modality_information['action'] = [i for _ in range(len(
current_data_name_modality_information))]
# Appends currently imported & modified skeleton point information to the combined modality skeleton
# point information
combined_modality_skeleton_information = combined_modality_skeleton_information.append(
current_data_name_modality_information)
return combined_modality_skeleton_information
def calculate_metrics(actual_values: np.ndarray,
predicted_values: np.ndarray):
"""Using actual_values, predicted_values calculates metrics such as accuracy, balanced accuracy, precision, recall,
and f1 scores.
Args:
actual_values: Actual action labels in the dataset
predicted_values: Action labels predicted by the currently trained model
Returns:
Dictionary contains keys as score names and values as scores which are floating point values.
"""
return {'accuracy_score': round(accuracy_score(actual_values, predicted_values) * 100, 3),
'balanced_accuracy_score': round(balanced_accuracy_score(actual_values, predicted_values) * 100, 3),
'precision_score': round(precision_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'recall_score': round(recall_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'f1_score': round(f1_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3)}
def retrieve_hyperparameters(current_model_name: str):
"""Based on the current_model_name returns a list of hyperparameters used for optimizing the model (if necessary).
Args:
current_model_name: Name of the model currently expected to be trained
Returns:
A dictionary containing the hyperparameter name and the values that will be used to optimize the model
"""
# For support_vector_classifier, the hyperparameter tuned is kernel.
if current_model_name == 'support_vector_classifier':
parameters = {'kernel': ['linear', 'poly', 'rbf']}
# For decision_tree_classifier, the hyperparameters tuned are criterion, splitter, and max_depth.
elif current_model_name == 'decision_tree_classifier':
parameters = {'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random'], 'max_depth': [2, 3, 4, 5, 6, 7]}
# For random_forest_classifier or extra_trees_classifier, the hyperparameters tuned are n_estimators, criterion, and
# max_depth
elif current_model_name == 'random_forest_classifier' or current_model_name == 'extra_trees_classifier':
parameters = {'n_estimators': [i * 10 for i in range(2, 11, 2)], 'criterion': ['gini', 'entropy'],
'max_depth': [2, 3, 4, 5, 6, 7]}
# For gradient_boosting_classifier, the hyperparameters tuned are loss, n_estimators, criterion, and max_depth.
elif current_model_name == 'gradient_boosting_classifier':
parameters = {'max_depth': [2, 3, 4, 5, 6, 7], 'n_estimators': [i * 10 for i in range(2, 11, 2)]}
# For gaussian_naive_bayes, none of the hyperparameters are tuned.
else:
parameters = {'None': ['None']}
return parameters
def split_data_input_target(skeleton_data: pd.DataFrame):
"""Splits skeleton_data into input and target datasets by filtering / selecting certain columns.
Args:
skeleton_data: Train / Validation / Test dataset used to split / filter certain columns.
Returns:
A tuple containing 2 numpy ndarrays for the input and target datasets.
"""
skeleton_data_input = skeleton_data.drop(columns=['data_name', 'action'])
skeleton_data_target = skeleton_data['action']
return np.array(skeleton_data_input), np.array(skeleton_data_target)
def video_based_model_testing(test_skeleton_information: pd.DataFrame,
current_model: sklearn):
"""Tests performance of the currently trained model on the validation or testing sets, where the performance is
evaluated per video / file, instead of evaluating per frame.
Args:
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the validation or testing sets.
current_model: Scikit-learn model that is currently being trained and tested.
Returns:
A tuple contains the target and predicted action for each video in the validation / testing set.
"""
# Identifies unique data_names in the validation / testing set.
test_data_names = np.unique(test_skeleton_information['data_name'])
test_target_data = []
test_predicted_data = []
# Iterates across the identified unique data names
for i in range(len(test_data_names)):
# Filters skeleton point information for the current data name.
current_data_name_skeleton_information = test_skeleton_information[test_skeleton_information['data_name'] ==
test_data_names[i]]
# Splits filtered skeleton point information into input and target data.
test_skeleton_input_data, test_skeleton_target_data = split_data_input_target(
current_data_name_skeleton_information)
# Predicts labels for each frame in the filtered skeleton point information.
test_skeleton_predicted_data = list(current_model.predict(test_skeleton_input_data))
# Identifies which predicted label has highest count and appends it to the final predicted data. Also, appends
# target label to the target data.
test_target_data.append(max(current_data_name_skeleton_information['action']))
test_predicted_data.append(max(test_skeleton_predicted_data, key=test_skeleton_predicted_data.count))
return np.array(test_target_data), np.array(test_predicted_data)
def model_training_testing(train_skeleton_information: pd.DataFrame,
validation_skeleton_information: pd.DataFrame,
test_skeleton_information: pd.DataFrame,
current_model_name: str,
parameters: dict):
"""Trains and validates model for the current model name and hyperparameters on the train_skeleton_informaiton and
validation_skeleton_information.
Args:
train_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Training set.
validation_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Validation set.
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Test set.
current_model_name: Name of the model currently expected to be trained.
parameters: Current parameter values used for training and validating the model.
Returns:
A tuple which contains the training metrics, validation metrics, & test metrics.
"""
# Based on the current_model_name, the scikit-learn object is initialized using the hyperparameter (if necessary)
if current_model_name == 'support_vector_classifier':
model = SVC(kernel=parameters['kernel'])
elif current_model_name == 'decision_tree_classifier':
model = DecisionTreeClassifier(criterion=parameters['criterion'], splitter=parameters['splitter'],
max_depth=parameters['max_depth'])
elif current_model_name == 'random_forest_classifier':
model = RandomForestClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'extra_trees_classifier':
model = ExtraTreesClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'gradient_boosting_classifier':
model = GradientBoostingClassifier(n_estimators=parameters['n_estimators'], max_depth=parameters['max_depth'])
else:
model = GaussianNB()
# Splits Training skeleton information into input and target data.
train_skeleton_input_data, train_skeleton_target_data = split_data_input_target(train_skeleton_information)
# Trains the object created for the model using the training input and target.
model.fit(train_skeleton_input_data, train_skeleton_target_data)
# Predict video based action labels for training and validation skeleton information data.
train_skeleton_target_data, train_skeleton_predicted_data = video_based_model_testing(train_skeleton_information,
model)
validation_skeleton_target_data, validation_skeleton_predicted_data = video_based_model_testing(
validation_skeleton_information, model)
test_skeleton_target_data, test_skeleton_predicted_data = video_based_model_testing(test_skeleton_information, model)
# Calculates metrics for the predicted action labels for the training and testing sets.
train_metrics = calculate_metrics(train_skeleton_target_data, train_skeleton_predicted_data)
validation_metrics = calculate_metrics(validation_skeleton_target_data, validation_skeleton_predicted_data)
test_metrics = calculate_metrics(test_skeleton_target_data, test_skeleton_predicted_data)
return train_metrics, validation_metrics, test_metrics
def per_combination_results_export(combination_name: str,
data_split: str,
metrics_dataframe: pd.DataFrame):
"""Exports the metrics_dataframe into a CSV format to the mentioned data_split folder. If the folder does not exist,
then the folder is created.
Args:
combination_name: Name of the current combination of modalities and skeleton pose model.
data_split: Name of the split the subset of the dataset belongs to.
metrics_dataframe: A dataframe containing the mean of all the metrics for all the hyperparameters & models.
Returns:
None.
"""
directory_path = '{}/{}'.format('../results/combination_results', combination_name)
if not os.path.isdir(directory_path):
os.mkdir(directory_path)
file_path = '{}/{}.csv'.format(directory_path, data_split)
metrics_dataframe.to_csv(file_path, index=False)
def appends_parameter_metrics_combination(current_model_name: str,
current_combination_name: str,
current_split_metrics: dict,
split_metrics_dataframe: pd.DataFrame):
"""Appends the metrics for the current model and current parameter combination to the main dataframe.
Args:
current_model_name: Name of the model currently being trained.
current_combination_name: Current combination of parameters used for training the model.
current_split_metrics: Metrics for the current parameter combination for the model.
split_metrics_dataframe: Pandas dataframe which contains metrics for the current combination of modalities.
Returns:
Updated version of the pandas dataframe which contains metrics for the current combination of modalities.
"""
current_split_metrics['model_names'] = current_model_name
current_split_metrics['parameters'] = current_combination_name
split_metrics_dataframe = split_metrics_dataframe.append(current_split_metrics, ignore_index=True)
return split_metrics_dataframe
def per_combination_model_training_testing(train_subject_ids: list,
validation_subject_ids: list,
test_subject_ids: list,
n_actions: int,
n_takes: int,
current_combination_modalities: list,
skeleton_pose_model: str,
model_names: list):
"""Combines skeleton point information based on modality combination, and subject id group. Trains, validates, and
tests the list of classifier models. Calculates metrics for each data split, model and parameter combination.
Args:
train_subject_ids: List of subject ids in the training set.
validation_subject_ids: List of subject ids in the validation set.
test_subject_ids: List of subject ids in the testing set.
n_actions: Total number of actions in the original dataset.
n_takes: Total number of takes in the original dataset.
current_combination_modalities: Current combination of modalities which will be used to import and combine
the dataset.
skeleton_pose_model: Name of the model currently used for extracting skeleton model.
model_names: List of ML classifier model names which will used creating the objects.
Returns:
None.
"""
# Combines skeleton point information based on modality combination, and subject id group.
train_skeleton_information = data_combiner(n_actions, train_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
validation_skeleton_information = data_combiner(n_actions, validation_subject_ids, n_takes,
current_combination_modalities, skeleton_pose_model)
test_skeleton_information = data_combiner(n_actions, test_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
# Creating empty dataframes for the metrics for current modality combination's training, validation, and testing
# datasets.
metrics_features = ['accuracy_score', 'balanced_accuracy_score', 'precision_score', 'recall_score', 'f1_score']
train_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
validation_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
test_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
combination_name = '_'.join(current_combination_modalities + [skeleton_pose_model])
# Iterates across model names and parameter grid for training and testing the classification models.
for i in range(len(model_names)):
# Retrieves parameters and generates parameter combinations.
parameters = retrieve_hyperparameters(model_names[i])
parameters_grid = ParameterGrid(parameters)
for j in range(len(parameters_grid)):
current_parameters_grid_name = ', '.join(['{}={}'.format(k, parameters_grid[j][k]) for k in
parameters_grid[j].keys()])
# Performs model training and testing. Also, generates metrics for the data splits.
training_metrics, validation_metrics, test_metrics = model_training_testing(
train_skeleton_information, validation_skeleton_information, test_skeleton_information, model_names[i],
parameters_grid[j])
# Appends current modality's train, validation, and test metrics to the main dataframes.
train_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, training_metrics, train_models_parameters_metrics)
validation_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, validation_metrics, validation_models_parameters_metrics)
test_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, test_metrics, test_models_parameters_metrics)
if model_names[i] != 'gaussian_naive_bayes':
print('modality_combination={}, model={}, {} completed successfully.'.format(
combination_name, model_names[i], current_parameters_grid_name))
else:
print('modality_combination={}, model={} completed successfully.'.format(combination_name,
model_names[i]))
# Exports main training, validation and testing metrics into CSV files.
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]), 'train_metrics',
train_models_parameters_metrics)
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]),
'validation_metrics', validation_models_parameters_metrics)
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]), 'test_metrics',
test_models_parameters_metrics)
def main():
print()
n_actions = 27
n_subjects = 8
n_takes = 4
skeleton_pose_models = ['coco', 'mpi']
modalities = ['rgb', 'depth', 'inertial']
model_names = ['gaussian_naive_bayes', 'support_vector_classifier', 'decision_tree_classifier',
'random_forest_classifier', 'extra_trees_classifier', 'gradient_boosting_classifier']
modality_combinations = list_combinations_generator(modalities)
train_subject_ids = [i for i in range(1, n_subjects - 1)]
validation_subject_ids = [n_subjects - 1]
test_subject_ids = [n_subjects]
for i in range(len(modality_combinations)):
for j in range(len(skeleton_pose_models)):
per_combination_model_training_testing(train_subject_ids, validation_subject_ids, test_subject_ids,
n_actions, n_takes, modality_combinations[i],
skeleton_pose_models[j], model_names)
print()
if __name__ == '__main__':
main()
|
def list_combinations_generator(modalities: list):
"""Generates combinations for items in the given list.
Args:
modalities: List of modalities available in the dataset.
Returns:
Combinations of items in the given list.
"""
modality_combinations = list()
# Iterates across modalities to generate combinations based on length.
for length in range(1, len(modalities) + 1):
# Generate combinations for the current length.
current_length_combinations = itertools.combinations(modalities, length)
# Iterates across the generated combinations to convert it into a list.
for combination in current_length_combinations:
current_combination_list = list()
for k in combination:
current_combination_list.append(k)
modality_combinations.append(current_combination_list)
return modality_combinations
| 29 | 53 |
# authors_name = 'Preetham Ganesh'
# project_title = 'Multi Sensor-based Human Activity Recognition using OpenCV and Sensor Fusion'
# email = '[email protected]'
import numpy as np
import os
import pandas as pd
import itertools
import logging
import sklearn.pipeline
from sklearn.metrics import accuracy_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import ParameterGrid
logging.getLogger('sklearn').setLevel(logging.FATAL)
def list_combinations_generator(modalities: list):
"""Generates combinations for items in the given list.
Args:
modalities: List of modalities available in the dataset.
Returns:
Combinations of items in the given list.
"""
modality_combinations = list()
# Iterates across modalities to generate combinations based on length.
for length in range(1, len(modalities) + 1):
# Generate combinations for the current length.
current_length_combinations = itertools.combinations(modalities, length)
# Iterates across the generated combinations to convert it into a list.
for combination in current_length_combinations:
current_combination_list = list()
for k in combination:
current_combination_list.append(k)
modality_combinations.append(current_combination_list)
return modality_combinations
def data_combiner(n_actions: int,
subject_ids: list,
n_takes: int,
modalities: list,
skeleton_pose_model: str):
"""Combines skeleton point information for all actions, all takes, given list of subject ids and given list of
modalities.
Args:
n_actions: Total number of actions in the original dataset.
subject_ids: List of subjects in the current set.
n_takes: Total number of takes in the original dataset.
modalities: Current combination of modalities.
skeleton_pose_model: Current skeleton pose model name which will be used to import skeleton point
information.
Returns:
A pandas dataframe which contains combined skeleton point information for all actions, all takes, given list
of subject ids and given list of modalities.
"""
combined_modality_skeleton_information = pd.DataFrame()
# Iterates across actions, subject_ids, takes, and modalities to combine skeleton point information.
for i in range(1, n_actions + 1):
for j in range(len(subject_ids)):
for k in range(1, n_takes + 1):
data_name = 'a{}_s{}_t{}'.format(i, subject_ids[j], k)
# Iterates across modalities to import skeleton point information file and adds it to
# combined_modality_skeleton_information. If file not found, it moves on to the next combination.
try:
# Imports 1st modality's skeleton point information for current data_name and skeleton_pose_model.
current_data_name_modality_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[0], data_name, skeleton_pose_model))
except FileNotFoundError:
continue
# Since, length of modalities in each combination is different. Hence, if length of modalities is
# greater than 1, then the imported skeleton point information for other modalities will be merged to
# the skeleton point information for the first modality.
if len(modalities) != 1:
for m in range(1, len(modalities)):
current_skeleton_point_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[m], data_name, skeleton_pose_model))
current_data_name_modality_information = pd.merge(current_data_name_modality_information,
current_skeleton_point_information,
on='frame', how='outer')
# Adds data_name to the imported skeleton point information.
current_data_name_modality_information['data_name'] = [data_name for _ in range(len(
current_data_name_modality_information))]
# Removes frame column from the imported skeleton point information.
current_data_name_modality_information = current_data_name_modality_information.drop(columns=['frame'])
# Adds action column to the imported skeleton point information.
current_data_name_modality_information['action'] = [i for _ in range(len(
current_data_name_modality_information))]
# Appends currently imported & modified skeleton point information to the combined modality skeleton
# point information
combined_modality_skeleton_information = combined_modality_skeleton_information.append(
current_data_name_modality_information)
return combined_modality_skeleton_information
def calculate_metrics(actual_values: np.ndarray,
predicted_values: np.ndarray):
"""Using actual_values, predicted_values calculates metrics such as accuracy, balanced accuracy, precision, recall,
and f1 scores.
Args:
actual_values: Actual action labels in the dataset
predicted_values: Action labels predicted by the currently trained model
Returns:
Dictionary contains keys as score names and values as scores which are floating point values.
"""
return {'accuracy_score': round(accuracy_score(actual_values, predicted_values) * 100, 3),
'balanced_accuracy_score': round(balanced_accuracy_score(actual_values, predicted_values) * 100, 3),
'precision_score': round(precision_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'recall_score': round(recall_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'f1_score': round(f1_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3)}
def retrieve_hyperparameters(current_model_name: str):
"""Based on the current_model_name returns a list of hyperparameters used for optimizing the model (if necessary).
Args:
current_model_name: Name of the model currently expected to be trained
Returns:
A dictionary containing the hyperparameter name and the values that will be used to optimize the model
"""
# For support_vector_classifier, the hyperparameter tuned is kernel.
if current_model_name == 'support_vector_classifier':
parameters = {'kernel': ['linear', 'poly', 'rbf']}
# For decision_tree_classifier, the hyperparameters tuned are criterion, splitter, and max_depth.
elif current_model_name == 'decision_tree_classifier':
parameters = {'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random'], 'max_depth': [2, 3, 4, 5, 6, 7]}
# For random_forest_classifier or extra_trees_classifier, the hyperparameters tuned are n_estimators, criterion, and
# max_depth
elif current_model_name == 'random_forest_classifier' or current_model_name == 'extra_trees_classifier':
parameters = {'n_estimators': [i * 10 for i in range(2, 11, 2)], 'criterion': ['gini', 'entropy'],
'max_depth': [2, 3, 4, 5, 6, 7]}
# For gradient_boosting_classifier, the hyperparameters tuned are loss, n_estimators, criterion, and max_depth.
elif current_model_name == 'gradient_boosting_classifier':
parameters = {'max_depth': [2, 3, 4, 5, 6, 7], 'n_estimators': [i * 10 for i in range(2, 11, 2)]}
# For gaussian_naive_bayes, none of the hyperparameters are tuned.
else:
parameters = {'None': ['None']}
return parameters
def split_data_input_target(skeleton_data: pd.DataFrame):
"""Splits skeleton_data into input and target datasets by filtering / selecting certain columns.
Args:
skeleton_data: Train / Validation / Test dataset used to split / filter certain columns.
Returns:
A tuple containing 2 numpy ndarrays for the input and target datasets.
"""
skeleton_data_input = skeleton_data.drop(columns=['data_name', 'action'])
skeleton_data_target = skeleton_data['action']
return np.array(skeleton_data_input), np.array(skeleton_data_target)
def video_based_model_testing(test_skeleton_information: pd.DataFrame,
current_model: sklearn):
"""Tests performance of the currently trained model on the validation or testing sets, where the performance is
evaluated per video / file, instead of evaluating per frame.
Args:
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the validation or testing sets.
current_model: Scikit-learn model that is currently being trained and tested.
Returns:
A tuple contains the target and predicted action for each video in the validation / testing set.
"""
# Identifies unique data_names in the validation / testing set.
test_data_names = np.unique(test_skeleton_information['data_name'])
test_target_data = []
test_predicted_data = []
# Iterates across the identified unique data names
for i in range(len(test_data_names)):
# Filters skeleton point information for the current data name.
current_data_name_skeleton_information = test_skeleton_information[test_skeleton_information['data_name'] ==
test_data_names[i]]
# Splits filtered skeleton point information into input and target data.
test_skeleton_input_data, test_skeleton_target_data = split_data_input_target(
current_data_name_skeleton_information)
# Predicts labels for each frame in the filtered skeleton point information.
test_skeleton_predicted_data = list(current_model.predict(test_skeleton_input_data))
# Identifies which predicted label has highest count and appends it to the final predicted data. Also, appends
# target label to the target data.
test_target_data.append(max(current_data_name_skeleton_information['action']))
test_predicted_data.append(max(test_skeleton_predicted_data, key=test_skeleton_predicted_data.count))
return np.array(test_target_data), np.array(test_predicted_data)
def model_training_testing(train_skeleton_information: pd.DataFrame,
validation_skeleton_information: pd.DataFrame,
test_skeleton_information: pd.DataFrame,
current_model_name: str,
parameters: dict):
"""Trains and validates model for the current model name and hyperparameters on the train_skeleton_informaiton and
validation_skeleton_information.
Args:
train_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Training set.
validation_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Validation set.
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Test set.
current_model_name: Name of the model currently expected to be trained.
parameters: Current parameter values used for training and validating the model.
Returns:
A tuple which contains the training metrics, validation metrics, & test metrics.
"""
# Based on the current_model_name, the scikit-learn object is initialized using the hyperparameter (if necessary)
if current_model_name == 'support_vector_classifier':
model = SVC(kernel=parameters['kernel'])
elif current_model_name == 'decision_tree_classifier':
model = DecisionTreeClassifier(criterion=parameters['criterion'], splitter=parameters['splitter'],
max_depth=parameters['max_depth'])
elif current_model_name == 'random_forest_classifier':
model = RandomForestClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'extra_trees_classifier':
model = ExtraTreesClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'gradient_boosting_classifier':
model = GradientBoostingClassifier(n_estimators=parameters['n_estimators'], max_depth=parameters['max_depth'])
else:
model = GaussianNB()
# Splits Training skeleton information into input and target data.
train_skeleton_input_data, train_skeleton_target_data = split_data_input_target(train_skeleton_information)
# Trains the object created for the model using the training input and target.
model.fit(train_skeleton_input_data, train_skeleton_target_data)
# Predict video based action labels for training and validation skeleton information data.
train_skeleton_target_data, train_skeleton_predicted_data = video_based_model_testing(train_skeleton_information,
model)
validation_skeleton_target_data, validation_skeleton_predicted_data = video_based_model_testing(
validation_skeleton_information, model)
test_skeleton_target_data, test_skeleton_predicted_data = video_based_model_testing(test_skeleton_information, model)
# Calculates metrics for the predicted action labels for the training and testing sets.
train_metrics = calculate_metrics(train_skeleton_target_data, train_skeleton_predicted_data)
validation_metrics = calculate_metrics(validation_skeleton_target_data, validation_skeleton_predicted_data)
test_metrics = calculate_metrics(test_skeleton_target_data, test_skeleton_predicted_data)
return train_metrics, validation_metrics, test_metrics
def per_combination_results_export(combination_name: str,
data_split: str,
metrics_dataframe: pd.DataFrame):
"""Exports the metrics_dataframe into a CSV format to the mentioned data_split folder. If the folder does not exist,
then the folder is created.
Args:
combination_name: Name of the current combination of modalities and skeleton pose model.
data_split: Name of the split the subset of the dataset belongs to.
metrics_dataframe: A dataframe containing the mean of all the metrics for all the hyperparameters & models.
Returns:
None.
"""
directory_path = '{}/{}'.format('../results/combination_results', combination_name)
if not os.path.isdir(directory_path):
os.mkdir(directory_path)
file_path = '{}/{}.csv'.format(directory_path, data_split)
metrics_dataframe.to_csv(file_path, index=False)
def appends_parameter_metrics_combination(current_model_name: str,
current_combination_name: str,
current_split_metrics: dict,
split_metrics_dataframe: pd.DataFrame):
"""Appends the metrics for the current model and current parameter combination to the main dataframe.
Args:
current_model_name: Name of the model currently being trained.
current_combination_name: Current combination of parameters used for training the model.
current_split_metrics: Metrics for the current parameter combination for the model.
split_metrics_dataframe: Pandas dataframe which contains metrics for the current combination of modalities.
Returns:
Updated version of the pandas dataframe which contains metrics for the current combination of modalities.
"""
current_split_metrics['model_names'] = current_model_name
current_split_metrics['parameters'] = current_combination_name
split_metrics_dataframe = split_metrics_dataframe.append(current_split_metrics, ignore_index=True)
return split_metrics_dataframe
def per_combination_model_training_testing(train_subject_ids: list,
validation_subject_ids: list,
test_subject_ids: list,
n_actions: int,
n_takes: int,
current_combination_modalities: list,
skeleton_pose_model: str,
model_names: list):
"""Combines skeleton point information based on modality combination, and subject id group. Trains, validates, and
tests the list of classifier models. Calculates metrics for each data split, model and parameter combination.
Args:
train_subject_ids: List of subject ids in the training set.
validation_subject_ids: List of subject ids in the validation set.
test_subject_ids: List of subject ids in the testing set.
n_actions: Total number of actions in the original dataset.
n_takes: Total number of takes in the original dataset.
current_combination_modalities: Current combination of modalities which will be used to import and combine
the dataset.
skeleton_pose_model: Name of the model currently used for extracting skeleton model.
model_names: List of ML classifier model names which will used creating the objects.
Returns:
None.
"""
# Combines skeleton point information based on modality combination, and subject id group.
train_skeleton_information = data_combiner(n_actions, train_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
validation_skeleton_information = data_combiner(n_actions, validation_subject_ids, n_takes,
current_combination_modalities, skeleton_pose_model)
test_skeleton_information = data_combiner(n_actions, test_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
# Creating empty dataframes for the metrics for current modality combination's training, validation, and testing
# datasets.
metrics_features = ['accuracy_score', 'balanced_accuracy_score', 'precision_score', 'recall_score', 'f1_score']
train_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
validation_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
test_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
combination_name = '_'.join(current_combination_modalities + [skeleton_pose_model])
# Iterates across model names and parameter grid for training and testing the classification models.
for i in range(len(model_names)):
# Retrieves parameters and generates parameter combinations.
parameters = retrieve_hyperparameters(model_names[i])
parameters_grid = ParameterGrid(parameters)
for j in range(len(parameters_grid)):
current_parameters_grid_name = ', '.join(['{}={}'.format(k, parameters_grid[j][k]) for k in
parameters_grid[j].keys()])
# Performs model training and testing. Also, generates metrics for the data splits.
training_metrics, validation_metrics, test_metrics = model_training_testing(
train_skeleton_information, validation_skeleton_information, test_skeleton_information, model_names[i],
parameters_grid[j])
# Appends current modality's train, validation, and test metrics to the main dataframes.
train_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, training_metrics, train_models_parameters_metrics)
validation_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, validation_metrics, validation_models_parameters_metrics)
test_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, test_metrics, test_models_parameters_metrics)
if model_names[i] != 'gaussian_naive_bayes':
print('modality_combination={}, model={}, {} completed successfully.'.format(
combination_name, model_names[i], current_parameters_grid_name))
else:
print('modality_combination={}, model={} completed successfully.'.format(combination_name,
model_names[i]))
# Exports main training, validation and testing metrics into CSV files.
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]), 'train_metrics',
train_models_parameters_metrics)
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]),
'validation_metrics', validation_models_parameters_metrics)
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]), 'test_metrics',
test_models_parameters_metrics)
def main():
print()
n_actions = 27
n_subjects = 8
n_takes = 4
skeleton_pose_models = ['coco', 'mpi']
modalities = ['rgb', 'depth', 'inertial']
model_names = ['gaussian_naive_bayes', 'support_vector_classifier', 'decision_tree_classifier',
'random_forest_classifier', 'extra_trees_classifier', 'gradient_boosting_classifier']
modality_combinations = list_combinations_generator(modalities)
train_subject_ids = [i for i in range(1, n_subjects - 1)]
validation_subject_ids = [n_subjects - 1]
test_subject_ids = [n_subjects]
for i in range(len(modality_combinations)):
for j in range(len(skeleton_pose_models)):
per_combination_model_training_testing(train_subject_ids, validation_subject_ids, test_subject_ids,
n_actions, n_takes, modality_combinations[i],
skeleton_pose_models[j], model_names)
print()
if __name__ == '__main__':
main()
|
data_combiner
|
Combines skeleton point information for all actions, all takes, given list of subject ids and given list of
modalities.
Args:
n_actions: Total number of actions in the original dataset.
subject_ids: List of subjects in the current set.
n_takes: Total number of takes in the original dataset.
modalities: Current combination of modalities.
skeleton_pose_model: Current skeleton pose model name which will be used to import skeleton point
information.
Returns:
A pandas dataframe which contains combined skeleton point information for all actions, all takes, given list
of subject ids and given list of modalities.
|
# authors_name = 'Preetham Ganesh'
# project_title = 'Multi Sensor-based Human Activity Recognition using OpenCV and Sensor Fusion'
# email = '[email protected]'
import numpy as np
import os
import pandas as pd
import itertools
import logging
import sklearn.pipeline
from sklearn.metrics import accuracy_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import ParameterGrid
logging.getLogger('sklearn').setLevel(logging.FATAL)
def list_combinations_generator(modalities: list):
"""Generates combinations for items in the given list.
Args:
modalities: List of modalities available in the dataset.
Returns:
Combinations of items in the given list.
"""
modality_combinations = list()
# Iterates across modalities to generate combinations based on length.
for length in range(1, len(modalities) + 1):
# Generate combinations for the current length.
current_length_combinations = itertools.combinations(modalities, length)
# Iterates across the generated combinations to convert it into a list.
for combination in current_length_combinations:
current_combination_list = list()
for k in combination:
current_combination_list.append(k)
modality_combinations.append(current_combination_list)
return modality_combinations
# MASKED: data_combiner function (lines 56-121)
def calculate_metrics(actual_values: np.ndarray,
predicted_values: np.ndarray):
"""Using actual_values, predicted_values calculates metrics such as accuracy, balanced accuracy, precision, recall,
and f1 scores.
Args:
actual_values: Actual action labels in the dataset
predicted_values: Action labels predicted by the currently trained model
Returns:
Dictionary contains keys as score names and values as scores which are floating point values.
"""
return {'accuracy_score': round(accuracy_score(actual_values, predicted_values) * 100, 3),
'balanced_accuracy_score': round(balanced_accuracy_score(actual_values, predicted_values) * 100, 3),
'precision_score': round(precision_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'recall_score': round(recall_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'f1_score': round(f1_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3)}
def retrieve_hyperparameters(current_model_name: str):
"""Based on the current_model_name returns a list of hyperparameters used for optimizing the model (if necessary).
Args:
current_model_name: Name of the model currently expected to be trained
Returns:
A dictionary containing the hyperparameter name and the values that will be used to optimize the model
"""
# For support_vector_classifier, the hyperparameter tuned is kernel.
if current_model_name == 'support_vector_classifier':
parameters = {'kernel': ['linear', 'poly', 'rbf']}
# For decision_tree_classifier, the hyperparameters tuned are criterion, splitter, and max_depth.
elif current_model_name == 'decision_tree_classifier':
parameters = {'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random'], 'max_depth': [2, 3, 4, 5, 6, 7]}
# For random_forest_classifier or extra_trees_classifier, the hyperparameters tuned are n_estimators, criterion, and
# max_depth
elif current_model_name == 'random_forest_classifier' or current_model_name == 'extra_trees_classifier':
parameters = {'n_estimators': [i * 10 for i in range(2, 11, 2)], 'criterion': ['gini', 'entropy'],
'max_depth': [2, 3, 4, 5, 6, 7]}
# For gradient_boosting_classifier, the hyperparameters tuned are loss, n_estimators, criterion, and max_depth.
elif current_model_name == 'gradient_boosting_classifier':
parameters = {'max_depth': [2, 3, 4, 5, 6, 7], 'n_estimators': [i * 10 for i in range(2, 11, 2)]}
# For gaussian_naive_bayes, none of the hyperparameters are tuned.
else:
parameters = {'None': ['None']}
return parameters
def split_data_input_target(skeleton_data: pd.DataFrame):
"""Splits skeleton_data into input and target datasets by filtering / selecting certain columns.
Args:
skeleton_data: Train / Validation / Test dataset used to split / filter certain columns.
Returns:
A tuple containing 2 numpy ndarrays for the input and target datasets.
"""
skeleton_data_input = skeleton_data.drop(columns=['data_name', 'action'])
skeleton_data_target = skeleton_data['action']
return np.array(skeleton_data_input), np.array(skeleton_data_target)
def video_based_model_testing(test_skeleton_information: pd.DataFrame,
current_model: sklearn):
"""Tests performance of the currently trained model on the validation or testing sets, where the performance is
evaluated per video / file, instead of evaluating per frame.
Args:
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the validation or testing sets.
current_model: Scikit-learn model that is currently being trained and tested.
Returns:
A tuple contains the target and predicted action for each video in the validation / testing set.
"""
# Identifies unique data_names in the validation / testing set.
test_data_names = np.unique(test_skeleton_information['data_name'])
test_target_data = []
test_predicted_data = []
# Iterates across the identified unique data names
for i in range(len(test_data_names)):
# Filters skeleton point information for the current data name.
current_data_name_skeleton_information = test_skeleton_information[test_skeleton_information['data_name'] ==
test_data_names[i]]
# Splits filtered skeleton point information into input and target data.
test_skeleton_input_data, test_skeleton_target_data = split_data_input_target(
current_data_name_skeleton_information)
# Predicts labels for each frame in the filtered skeleton point information.
test_skeleton_predicted_data = list(current_model.predict(test_skeleton_input_data))
# Identifies which predicted label has highest count and appends it to the final predicted data. Also, appends
# target label to the target data.
test_target_data.append(max(current_data_name_skeleton_information['action']))
test_predicted_data.append(max(test_skeleton_predicted_data, key=test_skeleton_predicted_data.count))
return np.array(test_target_data), np.array(test_predicted_data)
def model_training_testing(train_skeleton_information: pd.DataFrame,
validation_skeleton_information: pd.DataFrame,
test_skeleton_information: pd.DataFrame,
current_model_name: str,
parameters: dict):
"""Trains and validates model for the current model name and hyperparameters on the train_skeleton_informaiton and
validation_skeleton_information.
Args:
train_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Training set.
validation_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Validation set.
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Test set.
current_model_name: Name of the model currently expected to be trained.
parameters: Current parameter values used for training and validating the model.
Returns:
A tuple which contains the training metrics, validation metrics, & test metrics.
"""
# Based on the current_model_name, the scikit-learn object is initialized using the hyperparameter (if necessary)
if current_model_name == 'support_vector_classifier':
model = SVC(kernel=parameters['kernel'])
elif current_model_name == 'decision_tree_classifier':
model = DecisionTreeClassifier(criterion=parameters['criterion'], splitter=parameters['splitter'],
max_depth=parameters['max_depth'])
elif current_model_name == 'random_forest_classifier':
model = RandomForestClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'extra_trees_classifier':
model = ExtraTreesClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'gradient_boosting_classifier':
model = GradientBoostingClassifier(n_estimators=parameters['n_estimators'], max_depth=parameters['max_depth'])
else:
model = GaussianNB()
# Splits Training skeleton information into input and target data.
train_skeleton_input_data, train_skeleton_target_data = split_data_input_target(train_skeleton_information)
# Trains the object created for the model using the training input and target.
model.fit(train_skeleton_input_data, train_skeleton_target_data)
# Predict video based action labels for training and validation skeleton information data.
train_skeleton_target_data, train_skeleton_predicted_data = video_based_model_testing(train_skeleton_information,
model)
validation_skeleton_target_data, validation_skeleton_predicted_data = video_based_model_testing(
validation_skeleton_information, model)
test_skeleton_target_data, test_skeleton_predicted_data = video_based_model_testing(test_skeleton_information, model)
# Calculates metrics for the predicted action labels for the training and testing sets.
train_metrics = calculate_metrics(train_skeleton_target_data, train_skeleton_predicted_data)
validation_metrics = calculate_metrics(validation_skeleton_target_data, validation_skeleton_predicted_data)
test_metrics = calculate_metrics(test_skeleton_target_data, test_skeleton_predicted_data)
return train_metrics, validation_metrics, test_metrics
def per_combination_results_export(combination_name: str,
data_split: str,
metrics_dataframe: pd.DataFrame):
"""Exports the metrics_dataframe into a CSV format to the mentioned data_split folder. If the folder does not exist,
then the folder is created.
Args:
combination_name: Name of the current combination of modalities and skeleton pose model.
data_split: Name of the split the subset of the dataset belongs to.
metrics_dataframe: A dataframe containing the mean of all the metrics for all the hyperparameters & models.
Returns:
None.
"""
directory_path = '{}/{}'.format('../results/combination_results', combination_name)
if not os.path.isdir(directory_path):
os.mkdir(directory_path)
file_path = '{}/{}.csv'.format(directory_path, data_split)
metrics_dataframe.to_csv(file_path, index=False)
def appends_parameter_metrics_combination(current_model_name: str,
current_combination_name: str,
current_split_metrics: dict,
split_metrics_dataframe: pd.DataFrame):
"""Appends the metrics for the current model and current parameter combination to the main dataframe.
Args:
current_model_name: Name of the model currently being trained.
current_combination_name: Current combination of parameters used for training the model.
current_split_metrics: Metrics for the current parameter combination for the model.
split_metrics_dataframe: Pandas dataframe which contains metrics for the current combination of modalities.
Returns:
Updated version of the pandas dataframe which contains metrics for the current combination of modalities.
"""
current_split_metrics['model_names'] = current_model_name
current_split_metrics['parameters'] = current_combination_name
split_metrics_dataframe = split_metrics_dataframe.append(current_split_metrics, ignore_index=True)
return split_metrics_dataframe
def per_combination_model_training_testing(train_subject_ids: list,
validation_subject_ids: list,
test_subject_ids: list,
n_actions: int,
n_takes: int,
current_combination_modalities: list,
skeleton_pose_model: str,
model_names: list):
"""Combines skeleton point information based on modality combination, and subject id group. Trains, validates, and
tests the list of classifier models. Calculates metrics for each data split, model and parameter combination.
Args:
train_subject_ids: List of subject ids in the training set.
validation_subject_ids: List of subject ids in the validation set.
test_subject_ids: List of subject ids in the testing set.
n_actions: Total number of actions in the original dataset.
n_takes: Total number of takes in the original dataset.
current_combination_modalities: Current combination of modalities which will be used to import and combine
the dataset.
skeleton_pose_model: Name of the model currently used for extracting skeleton model.
model_names: List of ML classifier model names which will used creating the objects.
Returns:
None.
"""
# Combines skeleton point information based on modality combination, and subject id group.
train_skeleton_information = data_combiner(n_actions, train_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
validation_skeleton_information = data_combiner(n_actions, validation_subject_ids, n_takes,
current_combination_modalities, skeleton_pose_model)
test_skeleton_information = data_combiner(n_actions, test_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
# Creating empty dataframes for the metrics for current modality combination's training, validation, and testing
# datasets.
metrics_features = ['accuracy_score', 'balanced_accuracy_score', 'precision_score', 'recall_score', 'f1_score']
train_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
validation_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
test_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
combination_name = '_'.join(current_combination_modalities + [skeleton_pose_model])
# Iterates across model names and parameter grid for training and testing the classification models.
for i in range(len(model_names)):
# Retrieves parameters and generates parameter combinations.
parameters = retrieve_hyperparameters(model_names[i])
parameters_grid = ParameterGrid(parameters)
for j in range(len(parameters_grid)):
current_parameters_grid_name = ', '.join(['{}={}'.format(k, parameters_grid[j][k]) for k in
parameters_grid[j].keys()])
# Performs model training and testing. Also, generates metrics for the data splits.
training_metrics, validation_metrics, test_metrics = model_training_testing(
train_skeleton_information, validation_skeleton_information, test_skeleton_information, model_names[i],
parameters_grid[j])
# Appends current modality's train, validation, and test metrics to the main dataframes.
train_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, training_metrics, train_models_parameters_metrics)
validation_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, validation_metrics, validation_models_parameters_metrics)
test_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, test_metrics, test_models_parameters_metrics)
if model_names[i] != 'gaussian_naive_bayes':
print('modality_combination={}, model={}, {} completed successfully.'.format(
combination_name, model_names[i], current_parameters_grid_name))
else:
print('modality_combination={}, model={} completed successfully.'.format(combination_name,
model_names[i]))
# Exports main training, validation and testing metrics into CSV files.
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]), 'train_metrics',
train_models_parameters_metrics)
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]),
'validation_metrics', validation_models_parameters_metrics)
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]), 'test_metrics',
test_models_parameters_metrics)
def main():
print()
n_actions = 27
n_subjects = 8
n_takes = 4
skeleton_pose_models = ['coco', 'mpi']
modalities = ['rgb', 'depth', 'inertial']
model_names = ['gaussian_naive_bayes', 'support_vector_classifier', 'decision_tree_classifier',
'random_forest_classifier', 'extra_trees_classifier', 'gradient_boosting_classifier']
modality_combinations = list_combinations_generator(modalities)
train_subject_ids = [i for i in range(1, n_subjects - 1)]
validation_subject_ids = [n_subjects - 1]
test_subject_ids = [n_subjects]
for i in range(len(modality_combinations)):
for j in range(len(skeleton_pose_models)):
per_combination_model_training_testing(train_subject_ids, validation_subject_ids, test_subject_ids,
n_actions, n_takes, modality_combinations[i],
skeleton_pose_models[j], model_names)
print()
if __name__ == '__main__':
main()
|
def data_combiner(n_actions: int,
subject_ids: list,
n_takes: int,
modalities: list,
skeleton_pose_model: str):
"""Combines skeleton point information for all actions, all takes, given list of subject ids and given list of
modalities.
Args:
n_actions: Total number of actions in the original dataset.
subject_ids: List of subjects in the current set.
n_takes: Total number of takes in the original dataset.
modalities: Current combination of modalities.
skeleton_pose_model: Current skeleton pose model name which will be used to import skeleton point
information.
Returns:
A pandas dataframe which contains combined skeleton point information for all actions, all takes, given list
of subject ids and given list of modalities.
"""
combined_modality_skeleton_information = pd.DataFrame()
# Iterates across actions, subject_ids, takes, and modalities to combine skeleton point information.
for i in range(1, n_actions + 1):
for j in range(len(subject_ids)):
for k in range(1, n_takes + 1):
data_name = 'a{}_s{}_t{}'.format(i, subject_ids[j], k)
# Iterates across modalities to import skeleton point information file and adds it to
# combined_modality_skeleton_information. If file not found, it moves on to the next combination.
try:
# Imports 1st modality's skeleton point information for current data_name and skeleton_pose_model.
current_data_name_modality_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[0], data_name, skeleton_pose_model))
except FileNotFoundError:
continue
# Since, length of modalities in each combination is different. Hence, if length of modalities is
# greater than 1, then the imported skeleton point information for other modalities will be merged to
# the skeleton point information for the first modality.
if len(modalities) != 1:
for m in range(1, len(modalities)):
current_skeleton_point_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[m], data_name, skeleton_pose_model))
current_data_name_modality_information = pd.merge(current_data_name_modality_information,
current_skeleton_point_information,
on='frame', how='outer')
# Adds data_name to the imported skeleton point information.
current_data_name_modality_information['data_name'] = [data_name for _ in range(len(
current_data_name_modality_information))]
# Removes frame column from the imported skeleton point information.
current_data_name_modality_information = current_data_name_modality_information.drop(columns=['frame'])
# Adds action column to the imported skeleton point information.
current_data_name_modality_information['action'] = [i for _ in range(len(
current_data_name_modality_information))]
# Appends currently imported & modified skeleton point information to the combined modality skeleton
# point information
combined_modality_skeleton_information = combined_modality_skeleton_information.append(
current_data_name_modality_information)
return combined_modality_skeleton_information
| 56 | 121 |
# authors_name = 'Preetham Ganesh'
# project_title = 'Multi Sensor-based Human Activity Recognition using OpenCV and Sensor Fusion'
# email = '[email protected]'
import numpy as np
import os
import pandas as pd
import itertools
import logging
import sklearn.pipeline
from sklearn.metrics import accuracy_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import ParameterGrid
logging.getLogger('sklearn').setLevel(logging.FATAL)
def list_combinations_generator(modalities: list):
"""Generates combinations for items in the given list.
Args:
modalities: List of modalities available in the dataset.
Returns:
Combinations of items in the given list.
"""
modality_combinations = list()
# Iterates across modalities to generate combinations based on length.
for length in range(1, len(modalities) + 1):
# Generate combinations for the current length.
current_length_combinations = itertools.combinations(modalities, length)
# Iterates across the generated combinations to convert it into a list.
for combination in current_length_combinations:
current_combination_list = list()
for k in combination:
current_combination_list.append(k)
modality_combinations.append(current_combination_list)
return modality_combinations
def data_combiner(n_actions: int,
subject_ids: list,
n_takes: int,
modalities: list,
skeleton_pose_model: str):
"""Combines skeleton point information for all actions, all takes, given list of subject ids and given list of
modalities.
Args:
n_actions: Total number of actions in the original dataset.
subject_ids: List of subjects in the current set.
n_takes: Total number of takes in the original dataset.
modalities: Current combination of modalities.
skeleton_pose_model: Current skeleton pose model name which will be used to import skeleton point
information.
Returns:
A pandas dataframe which contains combined skeleton point information for all actions, all takes, given list
of subject ids and given list of modalities.
"""
combined_modality_skeleton_information = pd.DataFrame()
# Iterates across actions, subject_ids, takes, and modalities to combine skeleton point information.
for i in range(1, n_actions + 1):
for j in range(len(subject_ids)):
for k in range(1, n_takes + 1):
data_name = 'a{}_s{}_t{}'.format(i, subject_ids[j], k)
# Iterates across modalities to import skeleton point information file and adds it to
# combined_modality_skeleton_information. If file not found, it moves on to the next combination.
try:
# Imports 1st modality's skeleton point information for current data_name and skeleton_pose_model.
current_data_name_modality_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[0], data_name, skeleton_pose_model))
except FileNotFoundError:
continue
# Since, length of modalities in each combination is different. Hence, if length of modalities is
# greater than 1, then the imported skeleton point information for other modalities will be merged to
# the skeleton point information for the first modality.
if len(modalities) != 1:
for m in range(1, len(modalities)):
current_skeleton_point_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[m], data_name, skeleton_pose_model))
current_data_name_modality_information = pd.merge(current_data_name_modality_information,
current_skeleton_point_information,
on='frame', how='outer')
# Adds data_name to the imported skeleton point information.
current_data_name_modality_information['data_name'] = [data_name for _ in range(len(
current_data_name_modality_information))]
# Removes frame column from the imported skeleton point information.
current_data_name_modality_information = current_data_name_modality_information.drop(columns=['frame'])
# Adds action column to the imported skeleton point information.
current_data_name_modality_information['action'] = [i for _ in range(len(
current_data_name_modality_information))]
# Appends currently imported & modified skeleton point information to the combined modality skeleton
# point information
combined_modality_skeleton_information = combined_modality_skeleton_information.append(
current_data_name_modality_information)
return combined_modality_skeleton_information
def calculate_metrics(actual_values: np.ndarray,
predicted_values: np.ndarray):
"""Using actual_values, predicted_values calculates metrics such as accuracy, balanced accuracy, precision, recall,
and f1 scores.
Args:
actual_values: Actual action labels in the dataset
predicted_values: Action labels predicted by the currently trained model
Returns:
Dictionary contains keys as score names and values as scores which are floating point values.
"""
return {'accuracy_score': round(accuracy_score(actual_values, predicted_values) * 100, 3),
'balanced_accuracy_score': round(balanced_accuracy_score(actual_values, predicted_values) * 100, 3),
'precision_score': round(precision_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'recall_score': round(recall_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'f1_score': round(f1_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3)}
def retrieve_hyperparameters(current_model_name: str):
"""Based on the current_model_name returns a list of hyperparameters used for optimizing the model (if necessary).
Args:
current_model_name: Name of the model currently expected to be trained
Returns:
A dictionary containing the hyperparameter name and the values that will be used to optimize the model
"""
# For support_vector_classifier, the hyperparameter tuned is kernel.
if current_model_name == 'support_vector_classifier':
parameters = {'kernel': ['linear', 'poly', 'rbf']}
# For decision_tree_classifier, the hyperparameters tuned are criterion, splitter, and max_depth.
elif current_model_name == 'decision_tree_classifier':
parameters = {'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random'], 'max_depth': [2, 3, 4, 5, 6, 7]}
# For random_forest_classifier or extra_trees_classifier, the hyperparameters tuned are n_estimators, criterion, and
# max_depth
elif current_model_name == 'random_forest_classifier' or current_model_name == 'extra_trees_classifier':
parameters = {'n_estimators': [i * 10 for i in range(2, 11, 2)], 'criterion': ['gini', 'entropy'],
'max_depth': [2, 3, 4, 5, 6, 7]}
# For gradient_boosting_classifier, the hyperparameters tuned are loss, n_estimators, criterion, and max_depth.
elif current_model_name == 'gradient_boosting_classifier':
parameters = {'max_depth': [2, 3, 4, 5, 6, 7], 'n_estimators': [i * 10 for i in range(2, 11, 2)]}
# For gaussian_naive_bayes, none of the hyperparameters are tuned.
else:
parameters = {'None': ['None']}
return parameters
def split_data_input_target(skeleton_data: pd.DataFrame):
"""Splits skeleton_data into input and target datasets by filtering / selecting certain columns.
Args:
skeleton_data: Train / Validation / Test dataset used to split / filter certain columns.
Returns:
A tuple containing 2 numpy ndarrays for the input and target datasets.
"""
skeleton_data_input = skeleton_data.drop(columns=['data_name', 'action'])
skeleton_data_target = skeleton_data['action']
return np.array(skeleton_data_input), np.array(skeleton_data_target)
def video_based_model_testing(test_skeleton_information: pd.DataFrame,
current_model: sklearn):
"""Tests performance of the currently trained model on the validation or testing sets, where the performance is
evaluated per video / file, instead of evaluating per frame.
Args:
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the validation or testing sets.
current_model: Scikit-learn model that is currently being trained and tested.
Returns:
A tuple contains the target and predicted action for each video in the validation / testing set.
"""
# Identifies unique data_names in the validation / testing set.
test_data_names = np.unique(test_skeleton_information['data_name'])
test_target_data = []
test_predicted_data = []
# Iterates across the identified unique data names
for i in range(len(test_data_names)):
# Filters skeleton point information for the current data name.
current_data_name_skeleton_information = test_skeleton_information[test_skeleton_information['data_name'] ==
test_data_names[i]]
# Splits filtered skeleton point information into input and target data.
test_skeleton_input_data, test_skeleton_target_data = split_data_input_target(
current_data_name_skeleton_information)
# Predicts labels for each frame in the filtered skeleton point information.
test_skeleton_predicted_data = list(current_model.predict(test_skeleton_input_data))
# Identifies which predicted label has highest count and appends it to the final predicted data. Also, appends
# target label to the target data.
test_target_data.append(max(current_data_name_skeleton_information['action']))
test_predicted_data.append(max(test_skeleton_predicted_data, key=test_skeleton_predicted_data.count))
return np.array(test_target_data), np.array(test_predicted_data)
def model_training_testing(train_skeleton_information: pd.DataFrame,
validation_skeleton_information: pd.DataFrame,
test_skeleton_information: pd.DataFrame,
current_model_name: str,
parameters: dict):
"""Trains and validates model for the current model name and hyperparameters on the train_skeleton_informaiton and
validation_skeleton_information.
Args:
train_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Training set.
validation_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Validation set.
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Test set.
current_model_name: Name of the model currently expected to be trained.
parameters: Current parameter values used for training and validating the model.
Returns:
A tuple which contains the training metrics, validation metrics, & test metrics.
"""
# Based on the current_model_name, the scikit-learn object is initialized using the hyperparameter (if necessary)
if current_model_name == 'support_vector_classifier':
model = SVC(kernel=parameters['kernel'])
elif current_model_name == 'decision_tree_classifier':
model = DecisionTreeClassifier(criterion=parameters['criterion'], splitter=parameters['splitter'],
max_depth=parameters['max_depth'])
elif current_model_name == 'random_forest_classifier':
model = RandomForestClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'extra_trees_classifier':
model = ExtraTreesClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'gradient_boosting_classifier':
model = GradientBoostingClassifier(n_estimators=parameters['n_estimators'], max_depth=parameters['max_depth'])
else:
model = GaussianNB()
# Splits Training skeleton information into input and target data.
train_skeleton_input_data, train_skeleton_target_data = split_data_input_target(train_skeleton_information)
# Trains the object created for the model using the training input and target.
model.fit(train_skeleton_input_data, train_skeleton_target_data)
# Predict video based action labels for training and validation skeleton information data.
train_skeleton_target_data, train_skeleton_predicted_data = video_based_model_testing(train_skeleton_information,
model)
validation_skeleton_target_data, validation_skeleton_predicted_data = video_based_model_testing(
validation_skeleton_information, model)
test_skeleton_target_data, test_skeleton_predicted_data = video_based_model_testing(test_skeleton_information, model)
# Calculates metrics for the predicted action labels for the training and testing sets.
train_metrics = calculate_metrics(train_skeleton_target_data, train_skeleton_predicted_data)
validation_metrics = calculate_metrics(validation_skeleton_target_data, validation_skeleton_predicted_data)
test_metrics = calculate_metrics(test_skeleton_target_data, test_skeleton_predicted_data)
return train_metrics, validation_metrics, test_metrics
def per_combination_results_export(combination_name: str,
data_split: str,
metrics_dataframe: pd.DataFrame):
"""Exports the metrics_dataframe into a CSV format to the mentioned data_split folder. If the folder does not exist,
then the folder is created.
Args:
combination_name: Name of the current combination of modalities and skeleton pose model.
data_split: Name of the split the subset of the dataset belongs to.
metrics_dataframe: A dataframe containing the mean of all the metrics for all the hyperparameters & models.
Returns:
None.
"""
directory_path = '{}/{}'.format('../results/combination_results', combination_name)
if not os.path.isdir(directory_path):
os.mkdir(directory_path)
file_path = '{}/{}.csv'.format(directory_path, data_split)
metrics_dataframe.to_csv(file_path, index=False)
def appends_parameter_metrics_combination(current_model_name: str,
current_combination_name: str,
current_split_metrics: dict,
split_metrics_dataframe: pd.DataFrame):
"""Appends the metrics for the current model and current parameter combination to the main dataframe.
Args:
current_model_name: Name of the model currently being trained.
current_combination_name: Current combination of parameters used for training the model.
current_split_metrics: Metrics for the current parameter combination for the model.
split_metrics_dataframe: Pandas dataframe which contains metrics for the current combination of modalities.
Returns:
Updated version of the pandas dataframe which contains metrics for the current combination of modalities.
"""
current_split_metrics['model_names'] = current_model_name
current_split_metrics['parameters'] = current_combination_name
split_metrics_dataframe = split_metrics_dataframe.append(current_split_metrics, ignore_index=True)
return split_metrics_dataframe
def per_combination_model_training_testing(train_subject_ids: list,
validation_subject_ids: list,
test_subject_ids: list,
n_actions: int,
n_takes: int,
current_combination_modalities: list,
skeleton_pose_model: str,
model_names: list):
"""Combines skeleton point information based on modality combination, and subject id group. Trains, validates, and
tests the list of classifier models. Calculates metrics for each data split, model and parameter combination.
Args:
train_subject_ids: List of subject ids in the training set.
validation_subject_ids: List of subject ids in the validation set.
test_subject_ids: List of subject ids in the testing set.
n_actions: Total number of actions in the original dataset.
n_takes: Total number of takes in the original dataset.
current_combination_modalities: Current combination of modalities which will be used to import and combine
the dataset.
skeleton_pose_model: Name of the model currently used for extracting skeleton model.
model_names: List of ML classifier model names which will used creating the objects.
Returns:
None.
"""
# Combines skeleton point information based on modality combination, and subject id group.
train_skeleton_information = data_combiner(n_actions, train_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
validation_skeleton_information = data_combiner(n_actions, validation_subject_ids, n_takes,
current_combination_modalities, skeleton_pose_model)
test_skeleton_information = data_combiner(n_actions, test_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
# Creating empty dataframes for the metrics for current modality combination's training, validation, and testing
# datasets.
metrics_features = ['accuracy_score', 'balanced_accuracy_score', 'precision_score', 'recall_score', 'f1_score']
train_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
validation_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
test_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
combination_name = '_'.join(current_combination_modalities + [skeleton_pose_model])
# Iterates across model names and parameter grid for training and testing the classification models.
for i in range(len(model_names)):
# Retrieves parameters and generates parameter combinations.
parameters = retrieve_hyperparameters(model_names[i])
parameters_grid = ParameterGrid(parameters)
for j in range(len(parameters_grid)):
current_parameters_grid_name = ', '.join(['{}={}'.format(k, parameters_grid[j][k]) for k in
parameters_grid[j].keys()])
# Performs model training and testing. Also, generates metrics for the data splits.
training_metrics, validation_metrics, test_metrics = model_training_testing(
train_skeleton_information, validation_skeleton_information, test_skeleton_information, model_names[i],
parameters_grid[j])
# Appends current modality's train, validation, and test metrics to the main dataframes.
train_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, training_metrics, train_models_parameters_metrics)
validation_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, validation_metrics, validation_models_parameters_metrics)
test_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, test_metrics, test_models_parameters_metrics)
if model_names[i] != 'gaussian_naive_bayes':
print('modality_combination={}, model={}, {} completed successfully.'.format(
combination_name, model_names[i], current_parameters_grid_name))
else:
print('modality_combination={}, model={} completed successfully.'.format(combination_name,
model_names[i]))
# Exports main training, validation and testing metrics into CSV files.
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]), 'train_metrics',
train_models_parameters_metrics)
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]),
'validation_metrics', validation_models_parameters_metrics)
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]), 'test_metrics',
test_models_parameters_metrics)
def main():
print()
n_actions = 27
n_subjects = 8
n_takes = 4
skeleton_pose_models = ['coco', 'mpi']
modalities = ['rgb', 'depth', 'inertial']
model_names = ['gaussian_naive_bayes', 'support_vector_classifier', 'decision_tree_classifier',
'random_forest_classifier', 'extra_trees_classifier', 'gradient_boosting_classifier']
modality_combinations = list_combinations_generator(modalities)
train_subject_ids = [i for i in range(1, n_subjects - 1)]
validation_subject_ids = [n_subjects - 1]
test_subject_ids = [n_subjects]
for i in range(len(modality_combinations)):
for j in range(len(skeleton_pose_models)):
per_combination_model_training_testing(train_subject_ids, validation_subject_ids, test_subject_ids,
n_actions, n_takes, modality_combinations[i],
skeleton_pose_models[j], model_names)
print()
if __name__ == '__main__':
main()
|
retrieve_hyperparameters
|
Based on the current_model_name returns a list of hyperparameters used for optimizing the model (if necessary).
Args:
current_model_name: Name of the model currently expected to be trained
Returns:
A dictionary containing the hyperparameter name and the values that will be used to optimize the model
|
# authors_name = 'Preetham Ganesh'
# project_title = 'Multi Sensor-based Human Activity Recognition using OpenCV and Sensor Fusion'
# email = '[email protected]'
import numpy as np
import os
import pandas as pd
import itertools
import logging
import sklearn.pipeline
from sklearn.metrics import accuracy_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import ParameterGrid
logging.getLogger('sklearn').setLevel(logging.FATAL)
def list_combinations_generator(modalities: list):
"""Generates combinations for items in the given list.
Args:
modalities: List of modalities available in the dataset.
Returns:
Combinations of items in the given list.
"""
modality_combinations = list()
# Iterates across modalities to generate combinations based on length.
for length in range(1, len(modalities) + 1):
# Generate combinations for the current length.
current_length_combinations = itertools.combinations(modalities, length)
# Iterates across the generated combinations to convert it into a list.
for combination in current_length_combinations:
current_combination_list = list()
for k in combination:
current_combination_list.append(k)
modality_combinations.append(current_combination_list)
return modality_combinations
def data_combiner(n_actions: int,
subject_ids: list,
n_takes: int,
modalities: list,
skeleton_pose_model: str):
"""Combines skeleton point information for all actions, all takes, given list of subject ids and given list of
modalities.
Args:
n_actions: Total number of actions in the original dataset.
subject_ids: List of subjects in the current set.
n_takes: Total number of takes in the original dataset.
modalities: Current combination of modalities.
skeleton_pose_model: Current skeleton pose model name which will be used to import skeleton point
information.
Returns:
A pandas dataframe which contains combined skeleton point information for all actions, all takes, given list
of subject ids and given list of modalities.
"""
combined_modality_skeleton_information = pd.DataFrame()
# Iterates across actions, subject_ids, takes, and modalities to combine skeleton point information.
for i in range(1, n_actions + 1):
for j in range(len(subject_ids)):
for k in range(1, n_takes + 1):
data_name = 'a{}_s{}_t{}'.format(i, subject_ids[j], k)
# Iterates across modalities to import skeleton point information file and adds it to
# combined_modality_skeleton_information. If file not found, it moves on to the next combination.
try:
# Imports 1st modality's skeleton point information for current data_name and skeleton_pose_model.
current_data_name_modality_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[0], data_name, skeleton_pose_model))
except FileNotFoundError:
continue
# Since, length of modalities in each combination is different. Hence, if length of modalities is
# greater than 1, then the imported skeleton point information for other modalities will be merged to
# the skeleton point information for the first modality.
if len(modalities) != 1:
for m in range(1, len(modalities)):
current_skeleton_point_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[m], data_name, skeleton_pose_model))
current_data_name_modality_information = pd.merge(current_data_name_modality_information,
current_skeleton_point_information,
on='frame', how='outer')
# Adds data_name to the imported skeleton point information.
current_data_name_modality_information['data_name'] = [data_name for _ in range(len(
current_data_name_modality_information))]
# Removes frame column from the imported skeleton point information.
current_data_name_modality_information = current_data_name_modality_information.drop(columns=['frame'])
# Adds action column to the imported skeleton point information.
current_data_name_modality_information['action'] = [i for _ in range(len(
current_data_name_modality_information))]
# Appends currently imported & modified skeleton point information to the combined modality skeleton
# point information
combined_modality_skeleton_information = combined_modality_skeleton_information.append(
current_data_name_modality_information)
return combined_modality_skeleton_information
def calculate_metrics(actual_values: np.ndarray,
predicted_values: np.ndarray):
"""Using actual_values, predicted_values calculates metrics such as accuracy, balanced accuracy, precision, recall,
and f1 scores.
Args:
actual_values: Actual action labels in the dataset
predicted_values: Action labels predicted by the currently trained model
Returns:
Dictionary contains keys as score names and values as scores which are floating point values.
"""
return {'accuracy_score': round(accuracy_score(actual_values, predicted_values) * 100, 3),
'balanced_accuracy_score': round(balanced_accuracy_score(actual_values, predicted_values) * 100, 3),
'precision_score': round(precision_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'recall_score': round(recall_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'f1_score': round(f1_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3)}
# MASKED: retrieve_hyperparameters function (lines 146-177)
def split_data_input_target(skeleton_data: pd.DataFrame):
"""Splits skeleton_data into input and target datasets by filtering / selecting certain columns.
Args:
skeleton_data: Train / Validation / Test dataset used to split / filter certain columns.
Returns:
A tuple containing 2 numpy ndarrays for the input and target datasets.
"""
skeleton_data_input = skeleton_data.drop(columns=['data_name', 'action'])
skeleton_data_target = skeleton_data['action']
return np.array(skeleton_data_input), np.array(skeleton_data_target)
def video_based_model_testing(test_skeleton_information: pd.DataFrame,
current_model: sklearn):
"""Tests performance of the currently trained model on the validation or testing sets, where the performance is
evaluated per video / file, instead of evaluating per frame.
Args:
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the validation or testing sets.
current_model: Scikit-learn model that is currently being trained and tested.
Returns:
A tuple contains the target and predicted action for each video in the validation / testing set.
"""
# Identifies unique data_names in the validation / testing set.
test_data_names = np.unique(test_skeleton_information['data_name'])
test_target_data = []
test_predicted_data = []
# Iterates across the identified unique data names
for i in range(len(test_data_names)):
# Filters skeleton point information for the current data name.
current_data_name_skeleton_information = test_skeleton_information[test_skeleton_information['data_name'] ==
test_data_names[i]]
# Splits filtered skeleton point information into input and target data.
test_skeleton_input_data, test_skeleton_target_data = split_data_input_target(
current_data_name_skeleton_information)
# Predicts labels for each frame in the filtered skeleton point information.
test_skeleton_predicted_data = list(current_model.predict(test_skeleton_input_data))
# Identifies which predicted label has highest count and appends it to the final predicted data. Also, appends
# target label to the target data.
test_target_data.append(max(current_data_name_skeleton_information['action']))
test_predicted_data.append(max(test_skeleton_predicted_data, key=test_skeleton_predicted_data.count))
return np.array(test_target_data), np.array(test_predicted_data)
def model_training_testing(train_skeleton_information: pd.DataFrame,
validation_skeleton_information: pd.DataFrame,
test_skeleton_information: pd.DataFrame,
current_model_name: str,
parameters: dict):
"""Trains and validates model for the current model name and hyperparameters on the train_skeleton_informaiton and
validation_skeleton_information.
Args:
train_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Training set.
validation_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Validation set.
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Test set.
current_model_name: Name of the model currently expected to be trained.
parameters: Current parameter values used for training and validating the model.
Returns:
A tuple which contains the training metrics, validation metrics, & test metrics.
"""
# Based on the current_model_name, the scikit-learn object is initialized using the hyperparameter (if necessary)
if current_model_name == 'support_vector_classifier':
model = SVC(kernel=parameters['kernel'])
elif current_model_name == 'decision_tree_classifier':
model = DecisionTreeClassifier(criterion=parameters['criterion'], splitter=parameters['splitter'],
max_depth=parameters['max_depth'])
elif current_model_name == 'random_forest_classifier':
model = RandomForestClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'extra_trees_classifier':
model = ExtraTreesClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'gradient_boosting_classifier':
model = GradientBoostingClassifier(n_estimators=parameters['n_estimators'], max_depth=parameters['max_depth'])
else:
model = GaussianNB()
# Splits Training skeleton information into input and target data.
train_skeleton_input_data, train_skeleton_target_data = split_data_input_target(train_skeleton_information)
# Trains the object created for the model using the training input and target.
model.fit(train_skeleton_input_data, train_skeleton_target_data)
# Predict video based action labels for training and validation skeleton information data.
train_skeleton_target_data, train_skeleton_predicted_data = video_based_model_testing(train_skeleton_information,
model)
validation_skeleton_target_data, validation_skeleton_predicted_data = video_based_model_testing(
validation_skeleton_information, model)
test_skeleton_target_data, test_skeleton_predicted_data = video_based_model_testing(test_skeleton_information, model)
# Calculates metrics for the predicted action labels for the training and testing sets.
train_metrics = calculate_metrics(train_skeleton_target_data, train_skeleton_predicted_data)
validation_metrics = calculate_metrics(validation_skeleton_target_data, validation_skeleton_predicted_data)
test_metrics = calculate_metrics(test_skeleton_target_data, test_skeleton_predicted_data)
return train_metrics, validation_metrics, test_metrics
def per_combination_results_export(combination_name: str,
data_split: str,
metrics_dataframe: pd.DataFrame):
"""Exports the metrics_dataframe into a CSV format to the mentioned data_split folder. If the folder does not exist,
then the folder is created.
Args:
combination_name: Name of the current combination of modalities and skeleton pose model.
data_split: Name of the split the subset of the dataset belongs to.
metrics_dataframe: A dataframe containing the mean of all the metrics for all the hyperparameters & models.
Returns:
None.
"""
directory_path = '{}/{}'.format('../results/combination_results', combination_name)
if not os.path.isdir(directory_path):
os.mkdir(directory_path)
file_path = '{}/{}.csv'.format(directory_path, data_split)
metrics_dataframe.to_csv(file_path, index=False)
def appends_parameter_metrics_combination(current_model_name: str,
current_combination_name: str,
current_split_metrics: dict,
split_metrics_dataframe: pd.DataFrame):
"""Appends the metrics for the current model and current parameter combination to the main dataframe.
Args:
current_model_name: Name of the model currently being trained.
current_combination_name: Current combination of parameters used for training the model.
current_split_metrics: Metrics for the current parameter combination for the model.
split_metrics_dataframe: Pandas dataframe which contains metrics for the current combination of modalities.
Returns:
Updated version of the pandas dataframe which contains metrics for the current combination of modalities.
"""
current_split_metrics['model_names'] = current_model_name
current_split_metrics['parameters'] = current_combination_name
split_metrics_dataframe = split_metrics_dataframe.append(current_split_metrics, ignore_index=True)
return split_metrics_dataframe
def per_combination_model_training_testing(train_subject_ids: list,
validation_subject_ids: list,
test_subject_ids: list,
n_actions: int,
n_takes: int,
current_combination_modalities: list,
skeleton_pose_model: str,
model_names: list):
"""Combines skeleton point information based on modality combination, and subject id group. Trains, validates, and
tests the list of classifier models. Calculates metrics for each data split, model and parameter combination.
Args:
train_subject_ids: List of subject ids in the training set.
validation_subject_ids: List of subject ids in the validation set.
test_subject_ids: List of subject ids in the testing set.
n_actions: Total number of actions in the original dataset.
n_takes: Total number of takes in the original dataset.
current_combination_modalities: Current combination of modalities which will be used to import and combine
the dataset.
skeleton_pose_model: Name of the model currently used for extracting skeleton model.
model_names: List of ML classifier model names which will used creating the objects.
Returns:
None.
"""
# Combines skeleton point information based on modality combination, and subject id group.
train_skeleton_information = data_combiner(n_actions, train_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
validation_skeleton_information = data_combiner(n_actions, validation_subject_ids, n_takes,
current_combination_modalities, skeleton_pose_model)
test_skeleton_information = data_combiner(n_actions, test_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
# Creating empty dataframes for the metrics for current modality combination's training, validation, and testing
# datasets.
metrics_features = ['accuracy_score', 'balanced_accuracy_score', 'precision_score', 'recall_score', 'f1_score']
train_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
validation_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
test_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
combination_name = '_'.join(current_combination_modalities + [skeleton_pose_model])
# Iterates across model names and parameter grid for training and testing the classification models.
for i in range(len(model_names)):
# Retrieves parameters and generates parameter combinations.
parameters = retrieve_hyperparameters(model_names[i])
parameters_grid = ParameterGrid(parameters)
for j in range(len(parameters_grid)):
current_parameters_grid_name = ', '.join(['{}={}'.format(k, parameters_grid[j][k]) for k in
parameters_grid[j].keys()])
# Performs model training and testing. Also, generates metrics for the data splits.
training_metrics, validation_metrics, test_metrics = model_training_testing(
train_skeleton_information, validation_skeleton_information, test_skeleton_information, model_names[i],
parameters_grid[j])
# Appends current modality's train, validation, and test metrics to the main dataframes.
train_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, training_metrics, train_models_parameters_metrics)
validation_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, validation_metrics, validation_models_parameters_metrics)
test_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, test_metrics, test_models_parameters_metrics)
if model_names[i] != 'gaussian_naive_bayes':
print('modality_combination={}, model={}, {} completed successfully.'.format(
combination_name, model_names[i], current_parameters_grid_name))
else:
print('modality_combination={}, model={} completed successfully.'.format(combination_name,
model_names[i]))
# Exports main training, validation and testing metrics into CSV files.
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]), 'train_metrics',
train_models_parameters_metrics)
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]),
'validation_metrics', validation_models_parameters_metrics)
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]), 'test_metrics',
test_models_parameters_metrics)
def main():
print()
n_actions = 27
n_subjects = 8
n_takes = 4
skeleton_pose_models = ['coco', 'mpi']
modalities = ['rgb', 'depth', 'inertial']
model_names = ['gaussian_naive_bayes', 'support_vector_classifier', 'decision_tree_classifier',
'random_forest_classifier', 'extra_trees_classifier', 'gradient_boosting_classifier']
modality_combinations = list_combinations_generator(modalities)
train_subject_ids = [i for i in range(1, n_subjects - 1)]
validation_subject_ids = [n_subjects - 1]
test_subject_ids = [n_subjects]
for i in range(len(modality_combinations)):
for j in range(len(skeleton_pose_models)):
per_combination_model_training_testing(train_subject_ids, validation_subject_ids, test_subject_ids,
n_actions, n_takes, modality_combinations[i],
skeleton_pose_models[j], model_names)
print()
if __name__ == '__main__':
main()
|
def retrieve_hyperparameters(current_model_name: str):
"""Based on the current_model_name returns a list of hyperparameters used for optimizing the model (if necessary).
Args:
current_model_name: Name of the model currently expected to be trained
Returns:
A dictionary containing the hyperparameter name and the values that will be used to optimize the model
"""
# For support_vector_classifier, the hyperparameter tuned is kernel.
if current_model_name == 'support_vector_classifier':
parameters = {'kernel': ['linear', 'poly', 'rbf']}
# For decision_tree_classifier, the hyperparameters tuned are criterion, splitter, and max_depth.
elif current_model_name == 'decision_tree_classifier':
parameters = {'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random'], 'max_depth': [2, 3, 4, 5, 6, 7]}
# For random_forest_classifier or extra_trees_classifier, the hyperparameters tuned are n_estimators, criterion, and
# max_depth
elif current_model_name == 'random_forest_classifier' or current_model_name == 'extra_trees_classifier':
parameters = {'n_estimators': [i * 10 for i in range(2, 11, 2)], 'criterion': ['gini', 'entropy'],
'max_depth': [2, 3, 4, 5, 6, 7]}
# For gradient_boosting_classifier, the hyperparameters tuned are loss, n_estimators, criterion, and max_depth.
elif current_model_name == 'gradient_boosting_classifier':
parameters = {'max_depth': [2, 3, 4, 5, 6, 7], 'n_estimators': [i * 10 for i in range(2, 11, 2)]}
# For gaussian_naive_bayes, none of the hyperparameters are tuned.
else:
parameters = {'None': ['None']}
return parameters
| 146 | 177 |
# authors_name = 'Preetham Ganesh'
# project_title = 'Multi Sensor-based Human Activity Recognition using OpenCV and Sensor Fusion'
# email = '[email protected]'
import numpy as np
import os
import pandas as pd
import itertools
import logging
import sklearn.pipeline
from sklearn.metrics import accuracy_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import ParameterGrid
logging.getLogger('sklearn').setLevel(logging.FATAL)
def list_combinations_generator(modalities: list):
"""Generates combinations for items in the given list.
Args:
modalities: List of modalities available in the dataset.
Returns:
Combinations of items in the given list.
"""
modality_combinations = list()
# Iterates across modalities to generate combinations based on length.
for length in range(1, len(modalities) + 1):
# Generate combinations for the current length.
current_length_combinations = itertools.combinations(modalities, length)
# Iterates across the generated combinations to convert it into a list.
for combination in current_length_combinations:
current_combination_list = list()
for k in combination:
current_combination_list.append(k)
modality_combinations.append(current_combination_list)
return modality_combinations
def data_combiner(n_actions: int,
subject_ids: list,
n_takes: int,
modalities: list,
skeleton_pose_model: str):
"""Combines skeleton point information for all actions, all takes, given list of subject ids and given list of
modalities.
Args:
n_actions: Total number of actions in the original dataset.
subject_ids: List of subjects in the current set.
n_takes: Total number of takes in the original dataset.
modalities: Current combination of modalities.
skeleton_pose_model: Current skeleton pose model name which will be used to import skeleton point
information.
Returns:
A pandas dataframe which contains combined skeleton point information for all actions, all takes, given list
of subject ids and given list of modalities.
"""
combined_modality_skeleton_information = pd.DataFrame()
# Iterates across actions, subject_ids, takes, and modalities to combine skeleton point information.
for i in range(1, n_actions + 1):
for j in range(len(subject_ids)):
for k in range(1, n_takes + 1):
data_name = 'a{}_s{}_t{}'.format(i, subject_ids[j], k)
# Iterates across modalities to import skeleton point information file and adds it to
# combined_modality_skeleton_information. If file not found, it moves on to the next combination.
try:
# Imports 1st modality's skeleton point information for current data_name and skeleton_pose_model.
current_data_name_modality_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[0], data_name, skeleton_pose_model))
except FileNotFoundError:
continue
# Since, length of modalities in each combination is different. Hence, if length of modalities is
# greater than 1, then the imported skeleton point information for other modalities will be merged to
# the skeleton point information for the first modality.
if len(modalities) != 1:
for m in range(1, len(modalities)):
current_skeleton_point_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[m], data_name, skeleton_pose_model))
current_data_name_modality_information = pd.merge(current_data_name_modality_information,
current_skeleton_point_information,
on='frame', how='outer')
# Adds data_name to the imported skeleton point information.
current_data_name_modality_information['data_name'] = [data_name for _ in range(len(
current_data_name_modality_information))]
# Removes frame column from the imported skeleton point information.
current_data_name_modality_information = current_data_name_modality_information.drop(columns=['frame'])
# Adds action column to the imported skeleton point information.
current_data_name_modality_information['action'] = [i for _ in range(len(
current_data_name_modality_information))]
# Appends currently imported & modified skeleton point information to the combined modality skeleton
# point information
combined_modality_skeleton_information = combined_modality_skeleton_information.append(
current_data_name_modality_information)
return combined_modality_skeleton_information
def calculate_metrics(actual_values: np.ndarray,
predicted_values: np.ndarray):
"""Using actual_values, predicted_values calculates metrics such as accuracy, balanced accuracy, precision, recall,
and f1 scores.
Args:
actual_values: Actual action labels in the dataset
predicted_values: Action labels predicted by the currently trained model
Returns:
Dictionary contains keys as score names and values as scores which are floating point values.
"""
return {'accuracy_score': round(accuracy_score(actual_values, predicted_values) * 100, 3),
'balanced_accuracy_score': round(balanced_accuracy_score(actual_values, predicted_values) * 100, 3),
'precision_score': round(precision_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'recall_score': round(recall_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'f1_score': round(f1_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3)}
def retrieve_hyperparameters(current_model_name: str):
"""Based on the current_model_name returns a list of hyperparameters used for optimizing the model (if necessary).
Args:
current_model_name: Name of the model currently expected to be trained
Returns:
A dictionary containing the hyperparameter name and the values that will be used to optimize the model
"""
# For support_vector_classifier, the hyperparameter tuned is kernel.
if current_model_name == 'support_vector_classifier':
parameters = {'kernel': ['linear', 'poly', 'rbf']}
# For decision_tree_classifier, the hyperparameters tuned are criterion, splitter, and max_depth.
elif current_model_name == 'decision_tree_classifier':
parameters = {'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random'], 'max_depth': [2, 3, 4, 5, 6, 7]}
# For random_forest_classifier or extra_trees_classifier, the hyperparameters tuned are n_estimators, criterion, and
# max_depth
elif current_model_name == 'random_forest_classifier' or current_model_name == 'extra_trees_classifier':
parameters = {'n_estimators': [i * 10 for i in range(2, 11, 2)], 'criterion': ['gini', 'entropy'],
'max_depth': [2, 3, 4, 5, 6, 7]}
# For gradient_boosting_classifier, the hyperparameters tuned are loss, n_estimators, criterion, and max_depth.
elif current_model_name == 'gradient_boosting_classifier':
parameters = {'max_depth': [2, 3, 4, 5, 6, 7], 'n_estimators': [i * 10 for i in range(2, 11, 2)]}
# For gaussian_naive_bayes, none of the hyperparameters are tuned.
else:
parameters = {'None': ['None']}
return parameters
def split_data_input_target(skeleton_data: pd.DataFrame):
"""Splits skeleton_data into input and target datasets by filtering / selecting certain columns.
Args:
skeleton_data: Train / Validation / Test dataset used to split / filter certain columns.
Returns:
A tuple containing 2 numpy ndarrays for the input and target datasets.
"""
skeleton_data_input = skeleton_data.drop(columns=['data_name', 'action'])
skeleton_data_target = skeleton_data['action']
return np.array(skeleton_data_input), np.array(skeleton_data_target)
def video_based_model_testing(test_skeleton_information: pd.DataFrame,
current_model: sklearn):
"""Tests performance of the currently trained model on the validation or testing sets, where the performance is
evaluated per video / file, instead of evaluating per frame.
Args:
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the validation or testing sets.
current_model: Scikit-learn model that is currently being trained and tested.
Returns:
A tuple contains the target and predicted action for each video in the validation / testing set.
"""
# Identifies unique data_names in the validation / testing set.
test_data_names = np.unique(test_skeleton_information['data_name'])
test_target_data = []
test_predicted_data = []
# Iterates across the identified unique data names
for i in range(len(test_data_names)):
# Filters skeleton point information for the current data name.
current_data_name_skeleton_information = test_skeleton_information[test_skeleton_information['data_name'] ==
test_data_names[i]]
# Splits filtered skeleton point information into input and target data.
test_skeleton_input_data, test_skeleton_target_data = split_data_input_target(
current_data_name_skeleton_information)
# Predicts labels for each frame in the filtered skeleton point information.
test_skeleton_predicted_data = list(current_model.predict(test_skeleton_input_data))
# Identifies which predicted label has highest count and appends it to the final predicted data. Also, appends
# target label to the target data.
test_target_data.append(max(current_data_name_skeleton_information['action']))
test_predicted_data.append(max(test_skeleton_predicted_data, key=test_skeleton_predicted_data.count))
return np.array(test_target_data), np.array(test_predicted_data)
def model_training_testing(train_skeleton_information: pd.DataFrame,
validation_skeleton_information: pd.DataFrame,
test_skeleton_information: pd.DataFrame,
current_model_name: str,
parameters: dict):
"""Trains and validates model for the current model name and hyperparameters on the train_skeleton_informaiton and
validation_skeleton_information.
Args:
train_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Training set.
validation_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Validation set.
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Test set.
current_model_name: Name of the model currently expected to be trained.
parameters: Current parameter values used for training and validating the model.
Returns:
A tuple which contains the training metrics, validation metrics, & test metrics.
"""
# Based on the current_model_name, the scikit-learn object is initialized using the hyperparameter (if necessary)
if current_model_name == 'support_vector_classifier':
model = SVC(kernel=parameters['kernel'])
elif current_model_name == 'decision_tree_classifier':
model = DecisionTreeClassifier(criterion=parameters['criterion'], splitter=parameters['splitter'],
max_depth=parameters['max_depth'])
elif current_model_name == 'random_forest_classifier':
model = RandomForestClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'extra_trees_classifier':
model = ExtraTreesClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'gradient_boosting_classifier':
model = GradientBoostingClassifier(n_estimators=parameters['n_estimators'], max_depth=parameters['max_depth'])
else:
model = GaussianNB()
# Splits Training skeleton information into input and target data.
train_skeleton_input_data, train_skeleton_target_data = split_data_input_target(train_skeleton_information)
# Trains the object created for the model using the training input and target.
model.fit(train_skeleton_input_data, train_skeleton_target_data)
# Predict video based action labels for training and validation skeleton information data.
train_skeleton_target_data, train_skeleton_predicted_data = video_based_model_testing(train_skeleton_information,
model)
validation_skeleton_target_data, validation_skeleton_predicted_data = video_based_model_testing(
validation_skeleton_information, model)
test_skeleton_target_data, test_skeleton_predicted_data = video_based_model_testing(test_skeleton_information, model)
# Calculates metrics for the predicted action labels for the training and testing sets.
train_metrics = calculate_metrics(train_skeleton_target_data, train_skeleton_predicted_data)
validation_metrics = calculate_metrics(validation_skeleton_target_data, validation_skeleton_predicted_data)
test_metrics = calculate_metrics(test_skeleton_target_data, test_skeleton_predicted_data)
return train_metrics, validation_metrics, test_metrics
def per_combination_results_export(combination_name: str,
data_split: str,
metrics_dataframe: pd.DataFrame):
"""Exports the metrics_dataframe into a CSV format to the mentioned data_split folder. If the folder does not exist,
then the folder is created.
Args:
combination_name: Name of the current combination of modalities and skeleton pose model.
data_split: Name of the split the subset of the dataset belongs to.
metrics_dataframe: A dataframe containing the mean of all the metrics for all the hyperparameters & models.
Returns:
None.
"""
directory_path = '{}/{}'.format('../results/combination_results', combination_name)
if not os.path.isdir(directory_path):
os.mkdir(directory_path)
file_path = '{}/{}.csv'.format(directory_path, data_split)
metrics_dataframe.to_csv(file_path, index=False)
def appends_parameter_metrics_combination(current_model_name: str,
current_combination_name: str,
current_split_metrics: dict,
split_metrics_dataframe: pd.DataFrame):
"""Appends the metrics for the current model and current parameter combination to the main dataframe.
Args:
current_model_name: Name of the model currently being trained.
current_combination_name: Current combination of parameters used for training the model.
current_split_metrics: Metrics for the current parameter combination for the model.
split_metrics_dataframe: Pandas dataframe which contains metrics for the current combination of modalities.
Returns:
Updated version of the pandas dataframe which contains metrics for the current combination of modalities.
"""
current_split_metrics['model_names'] = current_model_name
current_split_metrics['parameters'] = current_combination_name
split_metrics_dataframe = split_metrics_dataframe.append(current_split_metrics, ignore_index=True)
return split_metrics_dataframe
def per_combination_model_training_testing(train_subject_ids: list,
validation_subject_ids: list,
test_subject_ids: list,
n_actions: int,
n_takes: int,
current_combination_modalities: list,
skeleton_pose_model: str,
model_names: list):
"""Combines skeleton point information based on modality combination, and subject id group. Trains, validates, and
tests the list of classifier models. Calculates metrics for each data split, model and parameter combination.
Args:
train_subject_ids: List of subject ids in the training set.
validation_subject_ids: List of subject ids in the validation set.
test_subject_ids: List of subject ids in the testing set.
n_actions: Total number of actions in the original dataset.
n_takes: Total number of takes in the original dataset.
current_combination_modalities: Current combination of modalities which will be used to import and combine
the dataset.
skeleton_pose_model: Name of the model currently used for extracting skeleton model.
model_names: List of ML classifier model names which will used creating the objects.
Returns:
None.
"""
# Combines skeleton point information based on modality combination, and subject id group.
train_skeleton_information = data_combiner(n_actions, train_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
validation_skeleton_information = data_combiner(n_actions, validation_subject_ids, n_takes,
current_combination_modalities, skeleton_pose_model)
test_skeleton_information = data_combiner(n_actions, test_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
# Creating empty dataframes for the metrics for current modality combination's training, validation, and testing
# datasets.
metrics_features = ['accuracy_score', 'balanced_accuracy_score', 'precision_score', 'recall_score', 'f1_score']
train_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
validation_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
test_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
combination_name = '_'.join(current_combination_modalities + [skeleton_pose_model])
# Iterates across model names and parameter grid for training and testing the classification models.
for i in range(len(model_names)):
# Retrieves parameters and generates parameter combinations.
parameters = retrieve_hyperparameters(model_names[i])
parameters_grid = ParameterGrid(parameters)
for j in range(len(parameters_grid)):
current_parameters_grid_name = ', '.join(['{}={}'.format(k, parameters_grid[j][k]) for k in
parameters_grid[j].keys()])
# Performs model training and testing. Also, generates metrics for the data splits.
training_metrics, validation_metrics, test_metrics = model_training_testing(
train_skeleton_information, validation_skeleton_information, test_skeleton_information, model_names[i],
parameters_grid[j])
# Appends current modality's train, validation, and test metrics to the main dataframes.
train_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, training_metrics, train_models_parameters_metrics)
validation_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, validation_metrics, validation_models_parameters_metrics)
test_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, test_metrics, test_models_parameters_metrics)
if model_names[i] != 'gaussian_naive_bayes':
print('modality_combination={}, model={}, {} completed successfully.'.format(
combination_name, model_names[i], current_parameters_grid_name))
else:
print('modality_combination={}, model={} completed successfully.'.format(combination_name,
model_names[i]))
# Exports main training, validation and testing metrics into CSV files.
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]), 'train_metrics',
train_models_parameters_metrics)
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]),
'validation_metrics', validation_models_parameters_metrics)
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]), 'test_metrics',
test_models_parameters_metrics)
def main():
print()
n_actions = 27
n_subjects = 8
n_takes = 4
skeleton_pose_models = ['coco', 'mpi']
modalities = ['rgb', 'depth', 'inertial']
model_names = ['gaussian_naive_bayes', 'support_vector_classifier', 'decision_tree_classifier',
'random_forest_classifier', 'extra_trees_classifier', 'gradient_boosting_classifier']
modality_combinations = list_combinations_generator(modalities)
train_subject_ids = [i for i in range(1, n_subjects - 1)]
validation_subject_ids = [n_subjects - 1]
test_subject_ids = [n_subjects]
for i in range(len(modality_combinations)):
for j in range(len(skeleton_pose_models)):
per_combination_model_training_testing(train_subject_ids, validation_subject_ids, test_subject_ids,
n_actions, n_takes, modality_combinations[i],
skeleton_pose_models[j], model_names)
print()
if __name__ == '__main__':
main()
|
per_combination_results_export
|
Exports the metrics_dataframe into a CSV format to the mentioned data_split folder. If the folder does not exist,
then the folder is created.
Args:
combination_name: Name of the current combination of modalities and skeleton pose model.
data_split: Name of the split the subset of the dataset belongs to.
metrics_dataframe: A dataframe containing the mean of all the metrics for all the hyperparameters & models.
Returns:
None.
|
# authors_name = 'Preetham Ganesh'
# project_title = 'Multi Sensor-based Human Activity Recognition using OpenCV and Sensor Fusion'
# email = '[email protected]'
import numpy as np
import os
import pandas as pd
import itertools
import logging
import sklearn.pipeline
from sklearn.metrics import accuracy_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import ParameterGrid
logging.getLogger('sklearn').setLevel(logging.FATAL)
def list_combinations_generator(modalities: list):
"""Generates combinations for items in the given list.
Args:
modalities: List of modalities available in the dataset.
Returns:
Combinations of items in the given list.
"""
modality_combinations = list()
# Iterates across modalities to generate combinations based on length.
for length in range(1, len(modalities) + 1):
# Generate combinations for the current length.
current_length_combinations = itertools.combinations(modalities, length)
# Iterates across the generated combinations to convert it into a list.
for combination in current_length_combinations:
current_combination_list = list()
for k in combination:
current_combination_list.append(k)
modality_combinations.append(current_combination_list)
return modality_combinations
def data_combiner(n_actions: int,
subject_ids: list,
n_takes: int,
modalities: list,
skeleton_pose_model: str):
"""Combines skeleton point information for all actions, all takes, given list of subject ids and given list of
modalities.
Args:
n_actions: Total number of actions in the original dataset.
subject_ids: List of subjects in the current set.
n_takes: Total number of takes in the original dataset.
modalities: Current combination of modalities.
skeleton_pose_model: Current skeleton pose model name which will be used to import skeleton point
information.
Returns:
A pandas dataframe which contains combined skeleton point information for all actions, all takes, given list
of subject ids and given list of modalities.
"""
combined_modality_skeleton_information = pd.DataFrame()
# Iterates across actions, subject_ids, takes, and modalities to combine skeleton point information.
for i in range(1, n_actions + 1):
for j in range(len(subject_ids)):
for k in range(1, n_takes + 1):
data_name = 'a{}_s{}_t{}'.format(i, subject_ids[j], k)
# Iterates across modalities to import skeleton point information file and adds it to
# combined_modality_skeleton_information. If file not found, it moves on to the next combination.
try:
# Imports 1st modality's skeleton point information for current data_name and skeleton_pose_model.
current_data_name_modality_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[0], data_name, skeleton_pose_model))
except FileNotFoundError:
continue
# Since, length of modalities in each combination is different. Hence, if length of modalities is
# greater than 1, then the imported skeleton point information for other modalities will be merged to
# the skeleton point information for the first modality.
if len(modalities) != 1:
for m in range(1, len(modalities)):
current_skeleton_point_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[m], data_name, skeleton_pose_model))
current_data_name_modality_information = pd.merge(current_data_name_modality_information,
current_skeleton_point_information,
on='frame', how='outer')
# Adds data_name to the imported skeleton point information.
current_data_name_modality_information['data_name'] = [data_name for _ in range(len(
current_data_name_modality_information))]
# Removes frame column from the imported skeleton point information.
current_data_name_modality_information = current_data_name_modality_information.drop(columns=['frame'])
# Adds action column to the imported skeleton point information.
current_data_name_modality_information['action'] = [i for _ in range(len(
current_data_name_modality_information))]
# Appends currently imported & modified skeleton point information to the combined modality skeleton
# point information
combined_modality_skeleton_information = combined_modality_skeleton_information.append(
current_data_name_modality_information)
return combined_modality_skeleton_information
def calculate_metrics(actual_values: np.ndarray,
predicted_values: np.ndarray):
"""Using actual_values, predicted_values calculates metrics such as accuracy, balanced accuracy, precision, recall,
and f1 scores.
Args:
actual_values: Actual action labels in the dataset
predicted_values: Action labels predicted by the currently trained model
Returns:
Dictionary contains keys as score names and values as scores which are floating point values.
"""
return {'accuracy_score': round(accuracy_score(actual_values, predicted_values) * 100, 3),
'balanced_accuracy_score': round(balanced_accuracy_score(actual_values, predicted_values) * 100, 3),
'precision_score': round(precision_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'recall_score': round(recall_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'f1_score': round(f1_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3)}
def retrieve_hyperparameters(current_model_name: str):
"""Based on the current_model_name returns a list of hyperparameters used for optimizing the model (if necessary).
Args:
current_model_name: Name of the model currently expected to be trained
Returns:
A dictionary containing the hyperparameter name and the values that will be used to optimize the model
"""
# For support_vector_classifier, the hyperparameter tuned is kernel.
if current_model_name == 'support_vector_classifier':
parameters = {'kernel': ['linear', 'poly', 'rbf']}
# For decision_tree_classifier, the hyperparameters tuned are criterion, splitter, and max_depth.
elif current_model_name == 'decision_tree_classifier':
parameters = {'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random'], 'max_depth': [2, 3, 4, 5, 6, 7]}
# For random_forest_classifier or extra_trees_classifier, the hyperparameters tuned are n_estimators, criterion, and
# max_depth
elif current_model_name == 'random_forest_classifier' or current_model_name == 'extra_trees_classifier':
parameters = {'n_estimators': [i * 10 for i in range(2, 11, 2)], 'criterion': ['gini', 'entropy'],
'max_depth': [2, 3, 4, 5, 6, 7]}
# For gradient_boosting_classifier, the hyperparameters tuned are loss, n_estimators, criterion, and max_depth.
elif current_model_name == 'gradient_boosting_classifier':
parameters = {'max_depth': [2, 3, 4, 5, 6, 7], 'n_estimators': [i * 10 for i in range(2, 11, 2)]}
# For gaussian_naive_bayes, none of the hyperparameters are tuned.
else:
parameters = {'None': ['None']}
return parameters
def split_data_input_target(skeleton_data: pd.DataFrame):
"""Splits skeleton_data into input and target datasets by filtering / selecting certain columns.
Args:
skeleton_data: Train / Validation / Test dataset used to split / filter certain columns.
Returns:
A tuple containing 2 numpy ndarrays for the input and target datasets.
"""
skeleton_data_input = skeleton_data.drop(columns=['data_name', 'action'])
skeleton_data_target = skeleton_data['action']
return np.array(skeleton_data_input), np.array(skeleton_data_target)
def video_based_model_testing(test_skeleton_information: pd.DataFrame,
current_model: sklearn):
"""Tests performance of the currently trained model on the validation or testing sets, where the performance is
evaluated per video / file, instead of evaluating per frame.
Args:
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the validation or testing sets.
current_model: Scikit-learn model that is currently being trained and tested.
Returns:
A tuple contains the target and predicted action for each video in the validation / testing set.
"""
# Identifies unique data_names in the validation / testing set.
test_data_names = np.unique(test_skeleton_information['data_name'])
test_target_data = []
test_predicted_data = []
# Iterates across the identified unique data names
for i in range(len(test_data_names)):
# Filters skeleton point information for the current data name.
current_data_name_skeleton_information = test_skeleton_information[test_skeleton_information['data_name'] ==
test_data_names[i]]
# Splits filtered skeleton point information into input and target data.
test_skeleton_input_data, test_skeleton_target_data = split_data_input_target(
current_data_name_skeleton_information)
# Predicts labels for each frame in the filtered skeleton point information.
test_skeleton_predicted_data = list(current_model.predict(test_skeleton_input_data))
# Identifies which predicted label has highest count and appends it to the final predicted data. Also, appends
# target label to the target data.
test_target_data.append(max(current_data_name_skeleton_information['action']))
test_predicted_data.append(max(test_skeleton_predicted_data, key=test_skeleton_predicted_data.count))
return np.array(test_target_data), np.array(test_predicted_data)
def model_training_testing(train_skeleton_information: pd.DataFrame,
validation_skeleton_information: pd.DataFrame,
test_skeleton_information: pd.DataFrame,
current_model_name: str,
parameters: dict):
"""Trains and validates model for the current model name and hyperparameters on the train_skeleton_informaiton and
validation_skeleton_information.
Args:
train_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Training set.
validation_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Validation set.
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Test set.
current_model_name: Name of the model currently expected to be trained.
parameters: Current parameter values used for training and validating the model.
Returns:
A tuple which contains the training metrics, validation metrics, & test metrics.
"""
# Based on the current_model_name, the scikit-learn object is initialized using the hyperparameter (if necessary)
if current_model_name == 'support_vector_classifier':
model = SVC(kernel=parameters['kernel'])
elif current_model_name == 'decision_tree_classifier':
model = DecisionTreeClassifier(criterion=parameters['criterion'], splitter=parameters['splitter'],
max_depth=parameters['max_depth'])
elif current_model_name == 'random_forest_classifier':
model = RandomForestClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'extra_trees_classifier':
model = ExtraTreesClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'gradient_boosting_classifier':
model = GradientBoostingClassifier(n_estimators=parameters['n_estimators'], max_depth=parameters['max_depth'])
else:
model = GaussianNB()
# Splits Training skeleton information into input and target data.
train_skeleton_input_data, train_skeleton_target_data = split_data_input_target(train_skeleton_information)
# Trains the object created for the model using the training input and target.
model.fit(train_skeleton_input_data, train_skeleton_target_data)
# Predict video based action labels for training and validation skeleton information data.
train_skeleton_target_data, train_skeleton_predicted_data = video_based_model_testing(train_skeleton_information,
model)
validation_skeleton_target_data, validation_skeleton_predicted_data = video_based_model_testing(
validation_skeleton_information, model)
test_skeleton_target_data, test_skeleton_predicted_data = video_based_model_testing(test_skeleton_information, model)
# Calculates metrics for the predicted action labels for the training and testing sets.
train_metrics = calculate_metrics(train_skeleton_target_data, train_skeleton_predicted_data)
validation_metrics = calculate_metrics(validation_skeleton_target_data, validation_skeleton_predicted_data)
test_metrics = calculate_metrics(test_skeleton_target_data, test_skeleton_predicted_data)
return train_metrics, validation_metrics, test_metrics
# MASKED: per_combination_results_export function (lines 293-311)
def appends_parameter_metrics_combination(current_model_name: str,
current_combination_name: str,
current_split_metrics: dict,
split_metrics_dataframe: pd.DataFrame):
"""Appends the metrics for the current model and current parameter combination to the main dataframe.
Args:
current_model_name: Name of the model currently being trained.
current_combination_name: Current combination of parameters used for training the model.
current_split_metrics: Metrics for the current parameter combination for the model.
split_metrics_dataframe: Pandas dataframe which contains metrics for the current combination of modalities.
Returns:
Updated version of the pandas dataframe which contains metrics for the current combination of modalities.
"""
current_split_metrics['model_names'] = current_model_name
current_split_metrics['parameters'] = current_combination_name
split_metrics_dataframe = split_metrics_dataframe.append(current_split_metrics, ignore_index=True)
return split_metrics_dataframe
def per_combination_model_training_testing(train_subject_ids: list,
validation_subject_ids: list,
test_subject_ids: list,
n_actions: int,
n_takes: int,
current_combination_modalities: list,
skeleton_pose_model: str,
model_names: list):
"""Combines skeleton point information based on modality combination, and subject id group. Trains, validates, and
tests the list of classifier models. Calculates metrics for each data split, model and parameter combination.
Args:
train_subject_ids: List of subject ids in the training set.
validation_subject_ids: List of subject ids in the validation set.
test_subject_ids: List of subject ids in the testing set.
n_actions: Total number of actions in the original dataset.
n_takes: Total number of takes in the original dataset.
current_combination_modalities: Current combination of modalities which will be used to import and combine
the dataset.
skeleton_pose_model: Name of the model currently used for extracting skeleton model.
model_names: List of ML classifier model names which will used creating the objects.
Returns:
None.
"""
# Combines skeleton point information based on modality combination, and subject id group.
train_skeleton_information = data_combiner(n_actions, train_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
validation_skeleton_information = data_combiner(n_actions, validation_subject_ids, n_takes,
current_combination_modalities, skeleton_pose_model)
test_skeleton_information = data_combiner(n_actions, test_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
# Creating empty dataframes for the metrics for current modality combination's training, validation, and testing
# datasets.
metrics_features = ['accuracy_score', 'balanced_accuracy_score', 'precision_score', 'recall_score', 'f1_score']
train_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
validation_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
test_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
combination_name = '_'.join(current_combination_modalities + [skeleton_pose_model])
# Iterates across model names and parameter grid for training and testing the classification models.
for i in range(len(model_names)):
# Retrieves parameters and generates parameter combinations.
parameters = retrieve_hyperparameters(model_names[i])
parameters_grid = ParameterGrid(parameters)
for j in range(len(parameters_grid)):
current_parameters_grid_name = ', '.join(['{}={}'.format(k, parameters_grid[j][k]) for k in
parameters_grid[j].keys()])
# Performs model training and testing. Also, generates metrics for the data splits.
training_metrics, validation_metrics, test_metrics = model_training_testing(
train_skeleton_information, validation_skeleton_information, test_skeleton_information, model_names[i],
parameters_grid[j])
# Appends current modality's train, validation, and test metrics to the main dataframes.
train_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, training_metrics, train_models_parameters_metrics)
validation_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, validation_metrics, validation_models_parameters_metrics)
test_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, test_metrics, test_models_parameters_metrics)
if model_names[i] != 'gaussian_naive_bayes':
print('modality_combination={}, model={}, {} completed successfully.'.format(
combination_name, model_names[i], current_parameters_grid_name))
else:
print('modality_combination={}, model={} completed successfully.'.format(combination_name,
model_names[i]))
# Exports main training, validation and testing metrics into CSV files.
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]), 'train_metrics',
train_models_parameters_metrics)
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]),
'validation_metrics', validation_models_parameters_metrics)
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]), 'test_metrics',
test_models_parameters_metrics)
def main():
print()
n_actions = 27
n_subjects = 8
n_takes = 4
skeleton_pose_models = ['coco', 'mpi']
modalities = ['rgb', 'depth', 'inertial']
model_names = ['gaussian_naive_bayes', 'support_vector_classifier', 'decision_tree_classifier',
'random_forest_classifier', 'extra_trees_classifier', 'gradient_boosting_classifier']
modality_combinations = list_combinations_generator(modalities)
train_subject_ids = [i for i in range(1, n_subjects - 1)]
validation_subject_ids = [n_subjects - 1]
test_subject_ids = [n_subjects]
for i in range(len(modality_combinations)):
for j in range(len(skeleton_pose_models)):
per_combination_model_training_testing(train_subject_ids, validation_subject_ids, test_subject_ids,
n_actions, n_takes, modality_combinations[i],
skeleton_pose_models[j], model_names)
print()
if __name__ == '__main__':
main()
|
def per_combination_results_export(combination_name: str,
data_split: str,
metrics_dataframe: pd.DataFrame):
"""Exports the metrics_dataframe into a CSV format to the mentioned data_split folder. If the folder does not exist,
then the folder is created.
Args:
combination_name: Name of the current combination of modalities and skeleton pose model.
data_split: Name of the split the subset of the dataset belongs to.
metrics_dataframe: A dataframe containing the mean of all the metrics for all the hyperparameters & models.
Returns:
None.
"""
directory_path = '{}/{}'.format('../results/combination_results', combination_name)
if not os.path.isdir(directory_path):
os.mkdir(directory_path)
file_path = '{}/{}.csv'.format(directory_path, data_split)
metrics_dataframe.to_csv(file_path, index=False)
| 293 | 311 |
# authors_name = 'Preetham Ganesh'
# project_title = 'Multi Sensor-based Human Activity Recognition using OpenCV and Sensor Fusion'
# email = '[email protected]'
import numpy as np
import os
import pandas as pd
import itertools
import logging
import sklearn.pipeline
from sklearn.metrics import accuracy_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import ParameterGrid
logging.getLogger('sklearn').setLevel(logging.FATAL)
def list_combinations_generator(modalities: list):
"""Generates combinations for items in the given list.
Args:
modalities: List of modalities available in the dataset.
Returns:
Combinations of items in the given list.
"""
modality_combinations = list()
# Iterates across modalities to generate combinations based on length.
for length in range(1, len(modalities) + 1):
# Generate combinations for the current length.
current_length_combinations = itertools.combinations(modalities, length)
# Iterates across the generated combinations to convert it into a list.
for combination in current_length_combinations:
current_combination_list = list()
for k in combination:
current_combination_list.append(k)
modality_combinations.append(current_combination_list)
return modality_combinations
def data_combiner(n_actions: int,
subject_ids: list,
n_takes: int,
modalities: list,
skeleton_pose_model: str):
"""Combines skeleton point information for all actions, all takes, given list of subject ids and given list of
modalities.
Args:
n_actions: Total number of actions in the original dataset.
subject_ids: List of subjects in the current set.
n_takes: Total number of takes in the original dataset.
modalities: Current combination of modalities.
skeleton_pose_model: Current skeleton pose model name which will be used to import skeleton point
information.
Returns:
A pandas dataframe which contains combined skeleton point information for all actions, all takes, given list
of subject ids and given list of modalities.
"""
combined_modality_skeleton_information = pd.DataFrame()
# Iterates across actions, subject_ids, takes, and modalities to combine skeleton point information.
for i in range(1, n_actions + 1):
for j in range(len(subject_ids)):
for k in range(1, n_takes + 1):
data_name = 'a{}_s{}_t{}'.format(i, subject_ids[j], k)
# Iterates across modalities to import skeleton point information file and adds it to
# combined_modality_skeleton_information. If file not found, it moves on to the next combination.
try:
# Imports 1st modality's skeleton point information for current data_name and skeleton_pose_model.
current_data_name_modality_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[0], data_name, skeleton_pose_model))
except FileNotFoundError:
continue
# Since, length of modalities in each combination is different. Hence, if length of modalities is
# greater than 1, then the imported skeleton point information for other modalities will be merged to
# the skeleton point information for the first modality.
if len(modalities) != 1:
for m in range(1, len(modalities)):
current_skeleton_point_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[m], data_name, skeleton_pose_model))
current_data_name_modality_information = pd.merge(current_data_name_modality_information,
current_skeleton_point_information,
on='frame', how='outer')
# Adds data_name to the imported skeleton point information.
current_data_name_modality_information['data_name'] = [data_name for _ in range(len(
current_data_name_modality_information))]
# Removes frame column from the imported skeleton point information.
current_data_name_modality_information = current_data_name_modality_information.drop(columns=['frame'])
# Adds action column to the imported skeleton point information.
current_data_name_modality_information['action'] = [i for _ in range(len(
current_data_name_modality_information))]
# Appends currently imported & modified skeleton point information to the combined modality skeleton
# point information
combined_modality_skeleton_information = combined_modality_skeleton_information.append(
current_data_name_modality_information)
return combined_modality_skeleton_information
def calculate_metrics(actual_values: np.ndarray,
predicted_values: np.ndarray):
"""Using actual_values, predicted_values calculates metrics such as accuracy, balanced accuracy, precision, recall,
and f1 scores.
Args:
actual_values: Actual action labels in the dataset
predicted_values: Action labels predicted by the currently trained model
Returns:
Dictionary contains keys as score names and values as scores which are floating point values.
"""
return {'accuracy_score': round(accuracy_score(actual_values, predicted_values) * 100, 3),
'balanced_accuracy_score': round(balanced_accuracy_score(actual_values, predicted_values) * 100, 3),
'precision_score': round(precision_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'recall_score': round(recall_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'f1_score': round(f1_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3)}
def retrieve_hyperparameters(current_model_name: str):
"""Based on the current_model_name returns a list of hyperparameters used for optimizing the model (if necessary).
Args:
current_model_name: Name of the model currently expected to be trained
Returns:
A dictionary containing the hyperparameter name and the values that will be used to optimize the model
"""
# For support_vector_classifier, the hyperparameter tuned is kernel.
if current_model_name == 'support_vector_classifier':
parameters = {'kernel': ['linear', 'poly', 'rbf']}
# For decision_tree_classifier, the hyperparameters tuned are criterion, splitter, and max_depth.
elif current_model_name == 'decision_tree_classifier':
parameters = {'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random'], 'max_depth': [2, 3, 4, 5, 6, 7]}
# For random_forest_classifier or extra_trees_classifier, the hyperparameters tuned are n_estimators, criterion, and
# max_depth
elif current_model_name == 'random_forest_classifier' or current_model_name == 'extra_trees_classifier':
parameters = {'n_estimators': [i * 10 for i in range(2, 11, 2)], 'criterion': ['gini', 'entropy'],
'max_depth': [2, 3, 4, 5, 6, 7]}
# For gradient_boosting_classifier, the hyperparameters tuned are loss, n_estimators, criterion, and max_depth.
elif current_model_name == 'gradient_boosting_classifier':
parameters = {'max_depth': [2, 3, 4, 5, 6, 7], 'n_estimators': [i * 10 for i in range(2, 11, 2)]}
# For gaussian_naive_bayes, none of the hyperparameters are tuned.
else:
parameters = {'None': ['None']}
return parameters
def split_data_input_target(skeleton_data: pd.DataFrame):
"""Splits skeleton_data into input and target datasets by filtering / selecting certain columns.
Args:
skeleton_data: Train / Validation / Test dataset used to split / filter certain columns.
Returns:
A tuple containing 2 numpy ndarrays for the input and target datasets.
"""
skeleton_data_input = skeleton_data.drop(columns=['data_name', 'action'])
skeleton_data_target = skeleton_data['action']
return np.array(skeleton_data_input), np.array(skeleton_data_target)
def video_based_model_testing(test_skeleton_information: pd.DataFrame,
current_model: sklearn):
"""Tests performance of the currently trained model on the validation or testing sets, where the performance is
evaluated per video / file, instead of evaluating per frame.
Args:
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the validation or testing sets.
current_model: Scikit-learn model that is currently being trained and tested.
Returns:
A tuple contains the target and predicted action for each video in the validation / testing set.
"""
# Identifies unique data_names in the validation / testing set.
test_data_names = np.unique(test_skeleton_information['data_name'])
test_target_data = []
test_predicted_data = []
# Iterates across the identified unique data names
for i in range(len(test_data_names)):
# Filters skeleton point information for the current data name.
current_data_name_skeleton_information = test_skeleton_information[test_skeleton_information['data_name'] ==
test_data_names[i]]
# Splits filtered skeleton point information into input and target data.
test_skeleton_input_data, test_skeleton_target_data = split_data_input_target(
current_data_name_skeleton_information)
# Predicts labels for each frame in the filtered skeleton point information.
test_skeleton_predicted_data = list(current_model.predict(test_skeleton_input_data))
# Identifies which predicted label has highest count and appends it to the final predicted data. Also, appends
# target label to the target data.
test_target_data.append(max(current_data_name_skeleton_information['action']))
test_predicted_data.append(max(test_skeleton_predicted_data, key=test_skeleton_predicted_data.count))
return np.array(test_target_data), np.array(test_predicted_data)
def model_training_testing(train_skeleton_information: pd.DataFrame,
validation_skeleton_information: pd.DataFrame,
test_skeleton_information: pd.DataFrame,
current_model_name: str,
parameters: dict):
"""Trains and validates model for the current model name and hyperparameters on the train_skeleton_informaiton and
validation_skeleton_information.
Args:
train_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Training set.
validation_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Validation set.
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Test set.
current_model_name: Name of the model currently expected to be trained.
parameters: Current parameter values used for training and validating the model.
Returns:
A tuple which contains the training metrics, validation metrics, & test metrics.
"""
# Based on the current_model_name, the scikit-learn object is initialized using the hyperparameter (if necessary)
if current_model_name == 'support_vector_classifier':
model = SVC(kernel=parameters['kernel'])
elif current_model_name == 'decision_tree_classifier':
model = DecisionTreeClassifier(criterion=parameters['criterion'], splitter=parameters['splitter'],
max_depth=parameters['max_depth'])
elif current_model_name == 'random_forest_classifier':
model = RandomForestClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'extra_trees_classifier':
model = ExtraTreesClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'gradient_boosting_classifier':
model = GradientBoostingClassifier(n_estimators=parameters['n_estimators'], max_depth=parameters['max_depth'])
else:
model = GaussianNB()
# Splits Training skeleton information into input and target data.
train_skeleton_input_data, train_skeleton_target_data = split_data_input_target(train_skeleton_information)
# Trains the object created for the model using the training input and target.
model.fit(train_skeleton_input_data, train_skeleton_target_data)
# Predict video based action labels for training and validation skeleton information data.
train_skeleton_target_data, train_skeleton_predicted_data = video_based_model_testing(train_skeleton_information,
model)
validation_skeleton_target_data, validation_skeleton_predicted_data = video_based_model_testing(
validation_skeleton_information, model)
test_skeleton_target_data, test_skeleton_predicted_data = video_based_model_testing(test_skeleton_information, model)
# Calculates metrics for the predicted action labels for the training and testing sets.
train_metrics = calculate_metrics(train_skeleton_target_data, train_skeleton_predicted_data)
validation_metrics = calculate_metrics(validation_skeleton_target_data, validation_skeleton_predicted_data)
test_metrics = calculate_metrics(test_skeleton_target_data, test_skeleton_predicted_data)
return train_metrics, validation_metrics, test_metrics
def per_combination_results_export(combination_name: str,
data_split: str,
metrics_dataframe: pd.DataFrame):
"""Exports the metrics_dataframe into a CSV format to the mentioned data_split folder. If the folder does not exist,
then the folder is created.
Args:
combination_name: Name of the current combination of modalities and skeleton pose model.
data_split: Name of the split the subset of the dataset belongs to.
metrics_dataframe: A dataframe containing the mean of all the metrics for all the hyperparameters & models.
Returns:
None.
"""
directory_path = '{}/{}'.format('../results/combination_results', combination_name)
if not os.path.isdir(directory_path):
os.mkdir(directory_path)
file_path = '{}/{}.csv'.format(directory_path, data_split)
metrics_dataframe.to_csv(file_path, index=False)
def appends_parameter_metrics_combination(current_model_name: str,
current_combination_name: str,
current_split_metrics: dict,
split_metrics_dataframe: pd.DataFrame):
"""Appends the metrics for the current model and current parameter combination to the main dataframe.
Args:
current_model_name: Name of the model currently being trained.
current_combination_name: Current combination of parameters used for training the model.
current_split_metrics: Metrics for the current parameter combination for the model.
split_metrics_dataframe: Pandas dataframe which contains metrics for the current combination of modalities.
Returns:
Updated version of the pandas dataframe which contains metrics for the current combination of modalities.
"""
current_split_metrics['model_names'] = current_model_name
current_split_metrics['parameters'] = current_combination_name
split_metrics_dataframe = split_metrics_dataframe.append(current_split_metrics, ignore_index=True)
return split_metrics_dataframe
def per_combination_model_training_testing(train_subject_ids: list,
validation_subject_ids: list,
test_subject_ids: list,
n_actions: int,
n_takes: int,
current_combination_modalities: list,
skeleton_pose_model: str,
model_names: list):
"""Combines skeleton point information based on modality combination, and subject id group. Trains, validates, and
tests the list of classifier models. Calculates metrics for each data split, model and parameter combination.
Args:
train_subject_ids: List of subject ids in the training set.
validation_subject_ids: List of subject ids in the validation set.
test_subject_ids: List of subject ids in the testing set.
n_actions: Total number of actions in the original dataset.
n_takes: Total number of takes in the original dataset.
current_combination_modalities: Current combination of modalities which will be used to import and combine
the dataset.
skeleton_pose_model: Name of the model currently used for extracting skeleton model.
model_names: List of ML classifier model names which will used creating the objects.
Returns:
None.
"""
# Combines skeleton point information based on modality combination, and subject id group.
train_skeleton_information = data_combiner(n_actions, train_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
validation_skeleton_information = data_combiner(n_actions, validation_subject_ids, n_takes,
current_combination_modalities, skeleton_pose_model)
test_skeleton_information = data_combiner(n_actions, test_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
# Creating empty dataframes for the metrics for current modality combination's training, validation, and testing
# datasets.
metrics_features = ['accuracy_score', 'balanced_accuracy_score', 'precision_score', 'recall_score', 'f1_score']
train_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
validation_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
test_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
combination_name = '_'.join(current_combination_modalities + [skeleton_pose_model])
# Iterates across model names and parameter grid for training and testing the classification models.
for i in range(len(model_names)):
# Retrieves parameters and generates parameter combinations.
parameters = retrieve_hyperparameters(model_names[i])
parameters_grid = ParameterGrid(parameters)
for j in range(len(parameters_grid)):
current_parameters_grid_name = ', '.join(['{}={}'.format(k, parameters_grid[j][k]) for k in
parameters_grid[j].keys()])
# Performs model training and testing. Also, generates metrics for the data splits.
training_metrics, validation_metrics, test_metrics = model_training_testing(
train_skeleton_information, validation_skeleton_information, test_skeleton_information, model_names[i],
parameters_grid[j])
# Appends current modality's train, validation, and test metrics to the main dataframes.
train_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, training_metrics, train_models_parameters_metrics)
validation_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, validation_metrics, validation_models_parameters_metrics)
test_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, test_metrics, test_models_parameters_metrics)
if model_names[i] != 'gaussian_naive_bayes':
print('modality_combination={}, model={}, {} completed successfully.'.format(
combination_name, model_names[i], current_parameters_grid_name))
else:
print('modality_combination={}, model={} completed successfully.'.format(combination_name,
model_names[i]))
# Exports main training, validation and testing metrics into CSV files.
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]), 'train_metrics',
train_models_parameters_metrics)
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]),
'validation_metrics', validation_models_parameters_metrics)
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]), 'test_metrics',
test_models_parameters_metrics)
def main():
print()
n_actions = 27
n_subjects = 8
n_takes = 4
skeleton_pose_models = ['coco', 'mpi']
modalities = ['rgb', 'depth', 'inertial']
model_names = ['gaussian_naive_bayes', 'support_vector_classifier', 'decision_tree_classifier',
'random_forest_classifier', 'extra_trees_classifier', 'gradient_boosting_classifier']
modality_combinations = list_combinations_generator(modalities)
train_subject_ids = [i for i in range(1, n_subjects - 1)]
validation_subject_ids = [n_subjects - 1]
test_subject_ids = [n_subjects]
for i in range(len(modality_combinations)):
for j in range(len(skeleton_pose_models)):
per_combination_model_training_testing(train_subject_ids, validation_subject_ids, test_subject_ids,
n_actions, n_takes, modality_combinations[i],
skeleton_pose_models[j], model_names)
print()
if __name__ == '__main__':
main()
|
__init__
|
Initialize Generator object.
Args:
batch_size: The size of the batches to generate.
group_method: Determines how images are grouped together (defaults to 'ratio', one of ('none', 'random', 'ratio')).
shuffle_groups: If True, shuffles the groups each epoch.
input_size:
max_objects:
|
import cv2
import keras
import math
import matplotlib.pyplot as plt
import numpy as np
import random
import warnings
from generators.utils import get_affine_transform, affine_transform
from generators.utils import gaussian_radius, draw_gaussian, gaussian_radius_2, draw_gaussian_2
class Generator(keras.utils.Sequence):
"""
Abstract generator class.
"""
# MASKED: __init__ function (lines 18-58)
def on_epoch_end(self):
if self.shuffle_groups:
random.shuffle(self.groups)
self.current_index = 0
def size(self):
"""
Size of the dataset.
"""
raise NotImplementedError('size method not implemented')
def num_classes(self):
"""
Number of classes in the dataset.
"""
raise NotImplementedError('num_classes method not implemented')
def has_label(self, label):
"""
Returns True if label is a known label.
"""
raise NotImplementedError('has_label method not implemented')
def has_name(self, name):
"""
Returns True if name is a known class.
"""
raise NotImplementedError('has_name method not implemented')
def name_to_label(self, name):
"""
Map name to label.
"""
raise NotImplementedError('name_to_label method not implemented')
def label_to_name(self, label):
"""
Map label to name.
"""
raise NotImplementedError('label_to_name method not implemented')
def image_aspect_ratio(self, image_index):
"""
Compute the aspect ratio for an image with image_index.
"""
raise NotImplementedError('image_aspect_ratio method not implemented')
def load_image(self, image_index):
"""
Load an image at the image_index.
"""
raise NotImplementedError('load_image method not implemented')
def load_annotations(self, image_index):
"""
Load annotations for an image_index.
"""
raise NotImplementedError('load_annotations method not implemented')
def load_annotations_group(self, group):
"""
Load annotations for all images in group.
"""
# load_annotations {'labels': np.array, 'annotations': np.array}
annotations_group = [self.load_annotations(image_index) for image_index in group]
for annotations in annotations_group:
assert (isinstance(annotations,
dict)), '\'load_annotations\' should return a list of dictionaries, received: {}'.format(
type(annotations))
assert (
'labels' in annotations), '\'load_annotations\' should return a list of dictionaries that contain \'labels\' and \'bboxes\'.'
assert (
'bboxes' in annotations), '\'load_annotations\' should return a list of dictionaries that contain \'labels\' and \'bboxes\'.'
return annotations_group
def filter_annotations(self, image_group, annotations_group, group):
"""
Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.
"""
# test all annotations
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
# test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]
invalid_indices = np.where(
(annotations['bboxes'][:, 2] <= annotations['bboxes'][:, 0]) |
(annotations['bboxes'][:, 3] <= annotations['bboxes'][:, 1]) |
(annotations['bboxes'][:, 0] < 0) |
(annotations['bboxes'][:, 1] < 0) |
(annotations['bboxes'][:, 2] <= 0) |
(annotations['bboxes'][:, 3] <= 0) |
(annotations['bboxes'][:, 2] > image.shape[1]) |
(annotations['bboxes'][:, 3] > image.shape[0])
)[0]
# delete invalid indices
if len(invalid_indices):
warnings.warn('Image with id {} (shape {}) contains the following invalid boxes: {}.'.format(
group[index],
image.shape,
annotations['bboxes'][invalid_indices, :]
))
for k in annotations_group[index].keys():
annotations_group[index][k] = np.delete(annotations[k], invalid_indices, axis=0)
if annotations['bboxes'].shape[0] == 0:
warnings.warn('Image with id {} (shape {}) contains no valid boxes before transform'.format(
group[index],
image.shape,
))
return image_group, annotations_group
def clip_transformed_annotations(self, image_group, annotations_group, group):
"""
Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.
"""
# test all annotations
filtered_image_group = []
filtered_annotations_group = []
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
image_height = image.shape[0]
image_width = image.shape[1]
# x1
annotations['bboxes'][:, 0] = np.clip(annotations['bboxes'][:, 0], 0, image_width - 2)
# y1
annotations['bboxes'][:, 1] = np.clip(annotations['bboxes'][:, 1], 0, image_height - 2)
# x2
annotations['bboxes'][:, 2] = np.clip(annotations['bboxes'][:, 2], 1, image_width - 1)
# y2
annotations['bboxes'][:, 3] = np.clip(annotations['bboxes'][:, 3], 1, image_height - 1)
# test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]
small_indices = np.where(
(annotations['bboxes'][:, 2] - annotations['bboxes'][:, 0] < 10) |
(annotations['bboxes'][:, 3] - annotations['bboxes'][:, 1] < 10)
)[0]
# delete invalid indices
if len(small_indices):
for k in annotations_group[index].keys():
annotations_group[index][k] = np.delete(annotations[k], small_indices, axis=0)
# import cv2
# for invalid_index in small_indices:
# x1, y1, x2, y2 = annotations['bboxes'][invalid_index]
# label = annotations['labels'][invalid_index]
# class_name = self.labels[label]
# print('width: {}'.format(x2 - x1))
# print('height: {}'.format(y2 - y1))
# cv2.rectangle(image, (int(round(x1)), int(round(y1))), (int(round(x2)), int(round(y2))), (0, 255, 0), 2)
# cv2.putText(image, class_name, (int(round(x1)), int(round(y1))), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 1)
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.imshow('image', image)
# cv2.waitKey(0)
if annotations_group[index]['bboxes'].shape[0] != 0:
filtered_image_group.append(image)
filtered_annotations_group.append(annotations_group[index])
else:
warnings.warn('Image with id {} (shape {}) contains no valid boxes after transform'.format(
group[index],
image.shape,
))
return filtered_image_group, filtered_annotations_group
def load_image_group(self, group):
"""
Load images for all images in a group.
"""
return [self.load_image(image_index) for image_index in group]
def random_visual_effect_group_entry(self, image, annotations):
"""
Randomly transforms image and annotation.
"""
# apply visual effect
image = self.visual_effect(image)
return image, annotations
def random_visual_effect_group(self, image_group, annotations_group):
"""
Randomly apply visual effect on each image.
"""
assert (len(image_group) == len(annotations_group))
if self.visual_effect is None:
# do nothing
return image_group, annotations_group
for index in range(len(image_group)):
# apply effect on a single group entry
image_group[index], annotations_group[index] = self.random_visual_effect_group_entry(
image_group[index], annotations_group[index]
)
return image_group, annotations_group
def random_transform_group_entry(self, image, annotations, transform=None):
"""
Randomly transforms image and annotation.
"""
# randomly transform both image and annotations
if transform is not None or self.transform_generator:
if transform is None:
transform = adjust_transform_for_image(next(self.transform_generator), image,
self.transform_parameters.relative_translation)
# apply transformation to image
image = apply_transform(transform, image, self.transform_parameters)
# Transform the bounding boxes in the annotations.
annotations['bboxes'] = annotations['bboxes'].copy()
for index in range(annotations['bboxes'].shape[0]):
annotations['bboxes'][index, :] = transform_aabb(transform, annotations['bboxes'][index, :])
return image, annotations
def random_transform_group(self, image_group, annotations_group):
"""
Randomly transforms each image and its annotations.
"""
assert (len(image_group) == len(annotations_group))
for index in range(len(image_group)):
# transform a single group entry
image_group[index], annotations_group[index] = self.random_transform_group_entry(image_group[index],
annotations_group[index])
return image_group, annotations_group
def random_misc_group_entry(self, image, annotations):
"""
Randomly transforms image and annotation.
"""
assert annotations['bboxes'].shape[0] != 0
# randomly transform both image and annotations
image, boxes = self.misc_effect(image, annotations['bboxes'])
# Transform the bounding boxes in the annotations.
annotations['bboxes'] = boxes
return image, annotations
def random_misc_group(self, image_group, annotations_group):
"""
Randomly transforms each image and its annotations.
"""
assert (len(image_group) == len(annotations_group))
if self.misc_effect is None:
return image_group, annotations_group
for index in range(len(image_group)):
# transform a single group entry
image_group[index], annotations_group[index] = self.random_misc_group_entry(image_group[index],
annotations_group[index])
return image_group, annotations_group
def preprocess_group_entry(self, image, annotations):
"""
Preprocess image and its annotations.
"""
# preprocess the image
image, scale, offset_h, offset_w = self.preprocess_image(image)
# apply resizing to annotations too
annotations['bboxes'] *= scale
annotations['bboxes'][:, [0, 2]] += offset_w
annotations['bboxes'][:, [1, 3]] += offset_h
# print(annotations['bboxes'][:, [2, 3]] - annotations['bboxes'][:, [0, 1]])
return image, annotations
def preprocess_group(self, image_group, annotations_group):
"""
Preprocess each image and its annotations in its group.
"""
assert (len(image_group) == len(annotations_group))
for index in range(len(image_group)):
# preprocess a single group entry
image_group[index], annotations_group[index] = self.preprocess_group_entry(image_group[index],
annotations_group[index])
return image_group, annotations_group
def group_images(self):
"""
Order the images according to self.order and makes groups of self.batch_size.
"""
# determine the order of the images
order = list(range(self.size()))
if self.group_method == 'random':
random.shuffle(order)
elif self.group_method == 'ratio':
order.sort(key=lambda x: self.image_aspect_ratio(x))
# divide into groups, one group = one batch
self.groups = [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in
range(0, len(order), self.batch_size)]
def compute_inputs(self, image_group, annotations_group):
"""
Compute inputs for the network using an image_group.
"""
# construct an image batch object
batch_images = np.zeros((len(image_group), self.input_size, self.input_size, 3), dtype=np.float32)
batch_hms = np.zeros((len(image_group), self.output_size, self.output_size, self.num_classes()),
dtype=np.float32)
batch_hms_2 = np.zeros((len(image_group), self.output_size, self.output_size, self.num_classes()),
dtype=np.float32)
batch_whs = np.zeros((len(image_group), self.max_objects, 2), dtype=np.float32)
batch_regs = np.zeros((len(image_group), self.max_objects, 2), dtype=np.float32)
batch_reg_masks = np.zeros((len(image_group), self.max_objects), dtype=np.float32)
batch_indices = np.zeros((len(image_group), self.max_objects), dtype=np.float32)
# copy all images to the upper left part of the image batch object
for b, (image, annotations) in enumerate(zip(image_group, annotations_group)):
c = np.array([image.shape[1] / 2., image.shape[0] / 2.], dtype=np.float32)
s = max(image.shape[0], image.shape[1]) * 1.0
trans_input = get_affine_transform(c, s, self.input_size)
# inputs
image = self.preprocess_image(image, c, s, tgt_w=self.input_size, tgt_h=self.input_size)
batch_images[b] = image
# outputs
bboxes = annotations['bboxes']
assert bboxes.shape[0] != 0
class_ids = annotations['labels']
assert class_ids.shape[0] != 0
trans_output = get_affine_transform(c, s, self.output_size)
for i in range(bboxes.shape[0]):
bbox = bboxes[i].copy()
cls_id = class_ids[i]
# (x1, y1)
bbox[:2] = affine_transform(bbox[:2], trans_output)
# (x2, y2)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, self.output_size - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, self.output_size - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h > 0 and w > 0:
radius_h, radius_w = gaussian_radius((math.ceil(h), math.ceil(w)))
radius_h = max(0, int(radius_h))
radius_w = max(0, int(radius_w))
radius = gaussian_radius_2((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
ct = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_gaussian(batch_hms[b, :, :, cls_id], ct_int, radius_h, radius_w)
draw_gaussian_2(batch_hms_2[b, :, :, cls_id], ct_int, radius)
batch_whs[b, i] = 1. * w, 1. * h
batch_indices[b, i] = ct_int[1] * self.output_size + ct_int[0]
batch_regs[b, i] = ct - ct_int
batch_reg_masks[b, i] = 1
# hm = batch_hms[b, :, :, cls_id]
# hm = np.round(hm * 255).astype(np.uint8)
# hm = cv2.cvtColor(hm, cv2.COLOR_GRAY2BGR)
# hm_2 = batch_hms_2[b, :, :, cls_id]
# hm_2 = np.round(hm_2 * 255).astype(np.uint8)
# hm_2 = cv2.cvtColor(hm_2, cv2.COLOR_GRAY2BGR)
# cv2.rectangle(hm, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 255, 0), 1)
# cv2.rectangle(hm_2, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 255, 0), 1)
# cv2.namedWindow('hm', cv2.WINDOW_NORMAL)
# cv2.imshow('hm', np.hstack([hm, hm_2]))
# cv2.waitKey()
# print(np.sum(batch_reg_masks[b]))
# for i in range(self.num_classes()):
# plt.subplot(4, 5, i + 1)
# hm = batch_hms[b, :, :, i]
# plt.imshow(hm, cmap='gray')
# plt.axis('off')
# plt.show()
# hm = np.sum(batch_hms[0], axis=-1)
# hm = np.round(hm * 255).astype(np.uint8)
# hm = cv2.cvtColor(hm, cv2.COLOR_GRAY2BGR)
# hm_2 = np.sum(batch_hms_2[0], axis=-1)
# hm_2 = np.round(hm_2 * 255).astype(np.uint8)
# hm_2 = cv2.cvtColor(hm_2, cv2.COLOR_GRAY2BGR)
# for i in range(bboxes.shape[0]):
# x1, y1 = np.round(affine_transform(bboxes[i, :2], trans_input)).astype(np.int32)
# x2, y2 = np.round(affine_transform(bboxes[i, 2:], trans_input)).astype(np.int32)
# x1_, y1_ = np.round(affine_transform(bboxes[i, :2], trans_output)).astype(np.int32)
# x2_, y2_ = np.round(affine_transform(bboxes[i, 2:], trans_output)).astype(np.int32)
# class_id = class_ids[i]
# cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 1)
# cv2.putText(image, str(class_id), (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 2.0getAffineTransform, (0, 0, 0), 3)
# cv2.rectangle(hm, (x1_, y1_), (x2_, y2_), (0, 255, 0), 1)
# cv2.rectangle(hm_2, (x1_, y1_), (x2_, y2_), (0, 255, 0), 1)
# cv2.namedWindow('hm', cv2.WINDOW_NORMAL)
# cv2.imshow('hm', np.hstack([hm, hm_2]))
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.imshow('image', image)
# cv2.waitKey()
return [batch_images, batch_hms_2, batch_whs, batch_regs, batch_reg_masks, batch_indices]
def compute_targets(self, image_group, annotations_group):
"""
Compute target outputs for the network using images and their annotations.
"""
return np.zeros((len(image_group),))
def compute_inputs_targets(self, group):
"""
Compute inputs and target outputs for the network.
"""
# load images and annotations
# list
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# randomly apply visual effect
image_group, annotations_group = self.random_visual_effect_group(image_group, annotations_group)
#
# # randomly transform data
# image_group, annotations_group = self.random_transform_group(image_group, annotations_group)
# randomly apply misc effect
image_group, annotations_group = self.random_misc_group(image_group, annotations_group)
#
# # perform preprocessing steps
# image_group, annotations_group = self.preprocess_group(image_group, annotations_group)
#
# # check validity of annotations
# image_group, annotations_group = self.clip_transformed_annotations(image_group, annotations_group, group)
if len(image_group) == 0:
return None, None
# compute network inputs
inputs = self.compute_inputs(image_group, annotations_group)
# compute network targets
targets = self.compute_targets(image_group, annotations_group)
return inputs, targets
def __len__(self):
"""
Number of batches for generator.
"""
return len(self.groups)
def __getitem__(self, index):
"""
Keras sequence method for generating batches.
"""
group = self.groups[self.current_index]
if self.multi_scale:
if self.current_index % 10 == 0:
random_size_index = np.random.randint(0, len(self.multi_image_sizes))
self.image_size = self.multi_image_sizes[random_size_index]
inputs, targets = self.compute_inputs_targets(group)
while inputs is None:
current_index = self.current_index + 1
if current_index >= len(self.groups):
current_index = current_index % (len(self.groups))
self.current_index = current_index
group = self.groups[self.current_index]
inputs, targets = self.compute_inputs_targets(group)
current_index = self.current_index + 1
if current_index >= len(self.groups):
current_index = current_index % (len(self.groups))
self.current_index = current_index
return inputs, targets
def preprocess_image(self, image, c, s, tgt_w, tgt_h):
trans_input = get_affine_transform(c, s, (tgt_w, tgt_h))
image = cv2.warpAffine(image, trans_input, (tgt_w, tgt_h), flags=cv2.INTER_LINEAR)
image = image.astype(np.float32)
image[..., 0] -= 103.939
image[..., 1] -= 116.779
image[..., 2] -= 123.68
return image
def get_transformed_group(self, group):
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# randomly transform data
image_group, annotations_group = self.random_transform_group(image_group, annotations_group)
return image_group, annotations_group
def get_cropped_and_rotated_group(self, group):
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# randomly transform data
image_group, annotations_group = self.random_crop_group(image_group, annotations_group)
image_group, annotations_group = self.random_rotate_group(image_group, annotations_group)
return image_group, annotations_group
|
def __init__(
self,
multi_scale=False,
multi_image_sizes=(320, 352, 384, 416, 448, 480, 512, 544, 576, 608),
misc_effect=None,
visual_effect=None,
batch_size=1,
group_method='ratio', # one of 'none', 'random', 'ratio'
shuffle_groups=True,
input_size=512,
max_objects=100
):
"""
Initialize Generator object.
Args:
batch_size: The size of the batches to generate.
group_method: Determines how images are grouped together (defaults to 'ratio', one of ('none', 'random', 'ratio')).
shuffle_groups: If True, shuffles the groups each epoch.
input_size:
max_objects:
"""
self.misc_effect = misc_effect
self.visual_effect = visual_effect
self.batch_size = int(batch_size)
self.group_method = group_method
self.shuffle_groups = shuffle_groups
self.input_size = input_size
self.output_size = self.input_size // 4
self.max_objects = max_objects
self.groups = None
self.multi_scale = multi_scale
self.multi_image_sizes = multi_image_sizes
self.current_index = 0
# Define groups
self.group_images()
# Shuffle when initializing
if self.shuffle_groups:
random.shuffle(self.groups)
| 18 | 58 |
import cv2
import keras
import math
import matplotlib.pyplot as plt
import numpy as np
import random
import warnings
from generators.utils import get_affine_transform, affine_transform
from generators.utils import gaussian_radius, draw_gaussian, gaussian_radius_2, draw_gaussian_2
class Generator(keras.utils.Sequence):
"""
Abstract generator class.
"""
def __init__(
self,
multi_scale=False,
multi_image_sizes=(320, 352, 384, 416, 448, 480, 512, 544, 576, 608),
misc_effect=None,
visual_effect=None,
batch_size=1,
group_method='ratio', # one of 'none', 'random', 'ratio'
shuffle_groups=True,
input_size=512,
max_objects=100
):
"""
Initialize Generator object.
Args:
batch_size: The size of the batches to generate.
group_method: Determines how images are grouped together (defaults to 'ratio', one of ('none', 'random', 'ratio')).
shuffle_groups: If True, shuffles the groups each epoch.
input_size:
max_objects:
"""
self.misc_effect = misc_effect
self.visual_effect = visual_effect
self.batch_size = int(batch_size)
self.group_method = group_method
self.shuffle_groups = shuffle_groups
self.input_size = input_size
self.output_size = self.input_size // 4
self.max_objects = max_objects
self.groups = None
self.multi_scale = multi_scale
self.multi_image_sizes = multi_image_sizes
self.current_index = 0
# Define groups
self.group_images()
# Shuffle when initializing
if self.shuffle_groups:
random.shuffle(self.groups)
def on_epoch_end(self):
if self.shuffle_groups:
random.shuffle(self.groups)
self.current_index = 0
def size(self):
"""
Size of the dataset.
"""
raise NotImplementedError('size method not implemented')
def num_classes(self):
"""
Number of classes in the dataset.
"""
raise NotImplementedError('num_classes method not implemented')
def has_label(self, label):
"""
Returns True if label is a known label.
"""
raise NotImplementedError('has_label method not implemented')
def has_name(self, name):
"""
Returns True if name is a known class.
"""
raise NotImplementedError('has_name method not implemented')
def name_to_label(self, name):
"""
Map name to label.
"""
raise NotImplementedError('name_to_label method not implemented')
def label_to_name(self, label):
"""
Map label to name.
"""
raise NotImplementedError('label_to_name method not implemented')
def image_aspect_ratio(self, image_index):
"""
Compute the aspect ratio for an image with image_index.
"""
raise NotImplementedError('image_aspect_ratio method not implemented')
def load_image(self, image_index):
"""
Load an image at the image_index.
"""
raise NotImplementedError('load_image method not implemented')
def load_annotations(self, image_index):
"""
Load annotations for an image_index.
"""
raise NotImplementedError('load_annotations method not implemented')
def load_annotations_group(self, group):
"""
Load annotations for all images in group.
"""
# load_annotations {'labels': np.array, 'annotations': np.array}
annotations_group = [self.load_annotations(image_index) for image_index in group]
for annotations in annotations_group:
assert (isinstance(annotations,
dict)), '\'load_annotations\' should return a list of dictionaries, received: {}'.format(
type(annotations))
assert (
'labels' in annotations), '\'load_annotations\' should return a list of dictionaries that contain \'labels\' and \'bboxes\'.'
assert (
'bboxes' in annotations), '\'load_annotations\' should return a list of dictionaries that contain \'labels\' and \'bboxes\'.'
return annotations_group
def filter_annotations(self, image_group, annotations_group, group):
"""
Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.
"""
# test all annotations
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
# test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]
invalid_indices = np.where(
(annotations['bboxes'][:, 2] <= annotations['bboxes'][:, 0]) |
(annotations['bboxes'][:, 3] <= annotations['bboxes'][:, 1]) |
(annotations['bboxes'][:, 0] < 0) |
(annotations['bboxes'][:, 1] < 0) |
(annotations['bboxes'][:, 2] <= 0) |
(annotations['bboxes'][:, 3] <= 0) |
(annotations['bboxes'][:, 2] > image.shape[1]) |
(annotations['bboxes'][:, 3] > image.shape[0])
)[0]
# delete invalid indices
if len(invalid_indices):
warnings.warn('Image with id {} (shape {}) contains the following invalid boxes: {}.'.format(
group[index],
image.shape,
annotations['bboxes'][invalid_indices, :]
))
for k in annotations_group[index].keys():
annotations_group[index][k] = np.delete(annotations[k], invalid_indices, axis=0)
if annotations['bboxes'].shape[0] == 0:
warnings.warn('Image with id {} (shape {}) contains no valid boxes before transform'.format(
group[index],
image.shape,
))
return image_group, annotations_group
def clip_transformed_annotations(self, image_group, annotations_group, group):
"""
Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.
"""
# test all annotations
filtered_image_group = []
filtered_annotations_group = []
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
image_height = image.shape[0]
image_width = image.shape[1]
# x1
annotations['bboxes'][:, 0] = np.clip(annotations['bboxes'][:, 0], 0, image_width - 2)
# y1
annotations['bboxes'][:, 1] = np.clip(annotations['bboxes'][:, 1], 0, image_height - 2)
# x2
annotations['bboxes'][:, 2] = np.clip(annotations['bboxes'][:, 2], 1, image_width - 1)
# y2
annotations['bboxes'][:, 3] = np.clip(annotations['bboxes'][:, 3], 1, image_height - 1)
# test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]
small_indices = np.where(
(annotations['bboxes'][:, 2] - annotations['bboxes'][:, 0] < 10) |
(annotations['bboxes'][:, 3] - annotations['bboxes'][:, 1] < 10)
)[0]
# delete invalid indices
if len(small_indices):
for k in annotations_group[index].keys():
annotations_group[index][k] = np.delete(annotations[k], small_indices, axis=0)
# import cv2
# for invalid_index in small_indices:
# x1, y1, x2, y2 = annotations['bboxes'][invalid_index]
# label = annotations['labels'][invalid_index]
# class_name = self.labels[label]
# print('width: {}'.format(x2 - x1))
# print('height: {}'.format(y2 - y1))
# cv2.rectangle(image, (int(round(x1)), int(round(y1))), (int(round(x2)), int(round(y2))), (0, 255, 0), 2)
# cv2.putText(image, class_name, (int(round(x1)), int(round(y1))), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 1)
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.imshow('image', image)
# cv2.waitKey(0)
if annotations_group[index]['bboxes'].shape[0] != 0:
filtered_image_group.append(image)
filtered_annotations_group.append(annotations_group[index])
else:
warnings.warn('Image with id {} (shape {}) contains no valid boxes after transform'.format(
group[index],
image.shape,
))
return filtered_image_group, filtered_annotations_group
def load_image_group(self, group):
"""
Load images for all images in a group.
"""
return [self.load_image(image_index) for image_index in group]
def random_visual_effect_group_entry(self, image, annotations):
"""
Randomly transforms image and annotation.
"""
# apply visual effect
image = self.visual_effect(image)
return image, annotations
def random_visual_effect_group(self, image_group, annotations_group):
"""
Randomly apply visual effect on each image.
"""
assert (len(image_group) == len(annotations_group))
if self.visual_effect is None:
# do nothing
return image_group, annotations_group
for index in range(len(image_group)):
# apply effect on a single group entry
image_group[index], annotations_group[index] = self.random_visual_effect_group_entry(
image_group[index], annotations_group[index]
)
return image_group, annotations_group
def random_transform_group_entry(self, image, annotations, transform=None):
"""
Randomly transforms image and annotation.
"""
# randomly transform both image and annotations
if transform is not None or self.transform_generator:
if transform is None:
transform = adjust_transform_for_image(next(self.transform_generator), image,
self.transform_parameters.relative_translation)
# apply transformation to image
image = apply_transform(transform, image, self.transform_parameters)
# Transform the bounding boxes in the annotations.
annotations['bboxes'] = annotations['bboxes'].copy()
for index in range(annotations['bboxes'].shape[0]):
annotations['bboxes'][index, :] = transform_aabb(transform, annotations['bboxes'][index, :])
return image, annotations
def random_transform_group(self, image_group, annotations_group):
"""
Randomly transforms each image and its annotations.
"""
assert (len(image_group) == len(annotations_group))
for index in range(len(image_group)):
# transform a single group entry
image_group[index], annotations_group[index] = self.random_transform_group_entry(image_group[index],
annotations_group[index])
return image_group, annotations_group
def random_misc_group_entry(self, image, annotations):
"""
Randomly transforms image and annotation.
"""
assert annotations['bboxes'].shape[0] != 0
# randomly transform both image and annotations
image, boxes = self.misc_effect(image, annotations['bboxes'])
# Transform the bounding boxes in the annotations.
annotations['bboxes'] = boxes
return image, annotations
def random_misc_group(self, image_group, annotations_group):
"""
Randomly transforms each image and its annotations.
"""
assert (len(image_group) == len(annotations_group))
if self.misc_effect is None:
return image_group, annotations_group
for index in range(len(image_group)):
# transform a single group entry
image_group[index], annotations_group[index] = self.random_misc_group_entry(image_group[index],
annotations_group[index])
return image_group, annotations_group
def preprocess_group_entry(self, image, annotations):
"""
Preprocess image and its annotations.
"""
# preprocess the image
image, scale, offset_h, offset_w = self.preprocess_image(image)
# apply resizing to annotations too
annotations['bboxes'] *= scale
annotations['bboxes'][:, [0, 2]] += offset_w
annotations['bboxes'][:, [1, 3]] += offset_h
# print(annotations['bboxes'][:, [2, 3]] - annotations['bboxes'][:, [0, 1]])
return image, annotations
def preprocess_group(self, image_group, annotations_group):
"""
Preprocess each image and its annotations in its group.
"""
assert (len(image_group) == len(annotations_group))
for index in range(len(image_group)):
# preprocess a single group entry
image_group[index], annotations_group[index] = self.preprocess_group_entry(image_group[index],
annotations_group[index])
return image_group, annotations_group
def group_images(self):
"""
Order the images according to self.order and makes groups of self.batch_size.
"""
# determine the order of the images
order = list(range(self.size()))
if self.group_method == 'random':
random.shuffle(order)
elif self.group_method == 'ratio':
order.sort(key=lambda x: self.image_aspect_ratio(x))
# divide into groups, one group = one batch
self.groups = [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in
range(0, len(order), self.batch_size)]
def compute_inputs(self, image_group, annotations_group):
"""
Compute inputs for the network using an image_group.
"""
# construct an image batch object
batch_images = np.zeros((len(image_group), self.input_size, self.input_size, 3), dtype=np.float32)
batch_hms = np.zeros((len(image_group), self.output_size, self.output_size, self.num_classes()),
dtype=np.float32)
batch_hms_2 = np.zeros((len(image_group), self.output_size, self.output_size, self.num_classes()),
dtype=np.float32)
batch_whs = np.zeros((len(image_group), self.max_objects, 2), dtype=np.float32)
batch_regs = np.zeros((len(image_group), self.max_objects, 2), dtype=np.float32)
batch_reg_masks = np.zeros((len(image_group), self.max_objects), dtype=np.float32)
batch_indices = np.zeros((len(image_group), self.max_objects), dtype=np.float32)
# copy all images to the upper left part of the image batch object
for b, (image, annotations) in enumerate(zip(image_group, annotations_group)):
c = np.array([image.shape[1] / 2., image.shape[0] / 2.], dtype=np.float32)
s = max(image.shape[0], image.shape[1]) * 1.0
trans_input = get_affine_transform(c, s, self.input_size)
# inputs
image = self.preprocess_image(image, c, s, tgt_w=self.input_size, tgt_h=self.input_size)
batch_images[b] = image
# outputs
bboxes = annotations['bboxes']
assert bboxes.shape[0] != 0
class_ids = annotations['labels']
assert class_ids.shape[0] != 0
trans_output = get_affine_transform(c, s, self.output_size)
for i in range(bboxes.shape[0]):
bbox = bboxes[i].copy()
cls_id = class_ids[i]
# (x1, y1)
bbox[:2] = affine_transform(bbox[:2], trans_output)
# (x2, y2)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, self.output_size - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, self.output_size - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h > 0 and w > 0:
radius_h, radius_w = gaussian_radius((math.ceil(h), math.ceil(w)))
radius_h = max(0, int(radius_h))
radius_w = max(0, int(radius_w))
radius = gaussian_radius_2((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
ct = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_gaussian(batch_hms[b, :, :, cls_id], ct_int, radius_h, radius_w)
draw_gaussian_2(batch_hms_2[b, :, :, cls_id], ct_int, radius)
batch_whs[b, i] = 1. * w, 1. * h
batch_indices[b, i] = ct_int[1] * self.output_size + ct_int[0]
batch_regs[b, i] = ct - ct_int
batch_reg_masks[b, i] = 1
# hm = batch_hms[b, :, :, cls_id]
# hm = np.round(hm * 255).astype(np.uint8)
# hm = cv2.cvtColor(hm, cv2.COLOR_GRAY2BGR)
# hm_2 = batch_hms_2[b, :, :, cls_id]
# hm_2 = np.round(hm_2 * 255).astype(np.uint8)
# hm_2 = cv2.cvtColor(hm_2, cv2.COLOR_GRAY2BGR)
# cv2.rectangle(hm, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 255, 0), 1)
# cv2.rectangle(hm_2, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 255, 0), 1)
# cv2.namedWindow('hm', cv2.WINDOW_NORMAL)
# cv2.imshow('hm', np.hstack([hm, hm_2]))
# cv2.waitKey()
# print(np.sum(batch_reg_masks[b]))
# for i in range(self.num_classes()):
# plt.subplot(4, 5, i + 1)
# hm = batch_hms[b, :, :, i]
# plt.imshow(hm, cmap='gray')
# plt.axis('off')
# plt.show()
# hm = np.sum(batch_hms[0], axis=-1)
# hm = np.round(hm * 255).astype(np.uint8)
# hm = cv2.cvtColor(hm, cv2.COLOR_GRAY2BGR)
# hm_2 = np.sum(batch_hms_2[0], axis=-1)
# hm_2 = np.round(hm_2 * 255).astype(np.uint8)
# hm_2 = cv2.cvtColor(hm_2, cv2.COLOR_GRAY2BGR)
# for i in range(bboxes.shape[0]):
# x1, y1 = np.round(affine_transform(bboxes[i, :2], trans_input)).astype(np.int32)
# x2, y2 = np.round(affine_transform(bboxes[i, 2:], trans_input)).astype(np.int32)
# x1_, y1_ = np.round(affine_transform(bboxes[i, :2], trans_output)).astype(np.int32)
# x2_, y2_ = np.round(affine_transform(bboxes[i, 2:], trans_output)).astype(np.int32)
# class_id = class_ids[i]
# cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 1)
# cv2.putText(image, str(class_id), (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 2.0getAffineTransform, (0, 0, 0), 3)
# cv2.rectangle(hm, (x1_, y1_), (x2_, y2_), (0, 255, 0), 1)
# cv2.rectangle(hm_2, (x1_, y1_), (x2_, y2_), (0, 255, 0), 1)
# cv2.namedWindow('hm', cv2.WINDOW_NORMAL)
# cv2.imshow('hm', np.hstack([hm, hm_2]))
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.imshow('image', image)
# cv2.waitKey()
return [batch_images, batch_hms_2, batch_whs, batch_regs, batch_reg_masks, batch_indices]
def compute_targets(self, image_group, annotations_group):
"""
Compute target outputs for the network using images and their annotations.
"""
return np.zeros((len(image_group),))
def compute_inputs_targets(self, group):
"""
Compute inputs and target outputs for the network.
"""
# load images and annotations
# list
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# randomly apply visual effect
image_group, annotations_group = self.random_visual_effect_group(image_group, annotations_group)
#
# # randomly transform data
# image_group, annotations_group = self.random_transform_group(image_group, annotations_group)
# randomly apply misc effect
image_group, annotations_group = self.random_misc_group(image_group, annotations_group)
#
# # perform preprocessing steps
# image_group, annotations_group = self.preprocess_group(image_group, annotations_group)
#
# # check validity of annotations
# image_group, annotations_group = self.clip_transformed_annotations(image_group, annotations_group, group)
if len(image_group) == 0:
return None, None
# compute network inputs
inputs = self.compute_inputs(image_group, annotations_group)
# compute network targets
targets = self.compute_targets(image_group, annotations_group)
return inputs, targets
def __len__(self):
"""
Number of batches for generator.
"""
return len(self.groups)
def __getitem__(self, index):
"""
Keras sequence method for generating batches.
"""
group = self.groups[self.current_index]
if self.multi_scale:
if self.current_index % 10 == 0:
random_size_index = np.random.randint(0, len(self.multi_image_sizes))
self.image_size = self.multi_image_sizes[random_size_index]
inputs, targets = self.compute_inputs_targets(group)
while inputs is None:
current_index = self.current_index + 1
if current_index >= len(self.groups):
current_index = current_index % (len(self.groups))
self.current_index = current_index
group = self.groups[self.current_index]
inputs, targets = self.compute_inputs_targets(group)
current_index = self.current_index + 1
if current_index >= len(self.groups):
current_index = current_index % (len(self.groups))
self.current_index = current_index
return inputs, targets
def preprocess_image(self, image, c, s, tgt_w, tgt_h):
trans_input = get_affine_transform(c, s, (tgt_w, tgt_h))
image = cv2.warpAffine(image, trans_input, (tgt_w, tgt_h), flags=cv2.INTER_LINEAR)
image = image.astype(np.float32)
image[..., 0] -= 103.939
image[..., 1] -= 116.779
image[..., 2] -= 123.68
return image
def get_transformed_group(self, group):
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# randomly transform data
image_group, annotations_group = self.random_transform_group(image_group, annotations_group)
return image_group, annotations_group
def get_cropped_and_rotated_group(self, group):
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# randomly transform data
image_group, annotations_group = self.random_crop_group(image_group, annotations_group)
image_group, annotations_group = self.random_rotate_group(image_group, annotations_group)
return image_group, annotations_group
|
group_stat_vars_by_observation_properties
|
Groups stat vars by their observation schemas.
Groups Stat Vars by their inclusion of StatVar Observation
properties like measurementMethod or Unit.
The current template MCF schema does not support optional values in the
CSV so we must place these stat vars into
different template MCFs and CSVs.
Args:
indicator_codes: List of World Bank indicator codes with
their Data Commons mappings, as a pandas dataframe.
Returns:
Array of tuples for each statistical variable grouping.
1) template MCF, as a string.
2) columns to include in exported csv, as a list of strings.
3) indicator codes in this grouping, as a list of strings.
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fetches, cleans, outputs TMCFs and CSVs for all World Bank development
indicator codes provided in WorldBankIndicators.csv for all years and for
all countries provided in WorldBankCountries.csv. """
from absl import app
import pandas as pd
import itertools
import requests
import zipfile
import io
import re
# Remaps the columns provided by World Bank API.
WORLDBANK_COL_REMAP = {
'Country Name': 'CountryName',
'Country Code': 'CountryCode',
'Indicator Name': 'IndicatorName',
'Indicator Code': 'IndicatorCode'
}
TEMPLATE_TMCF = """
Node: E:WorldBank->E0
typeOf: dcs:StatVarObservation
variableMeasured: C:WorldBank->StatisticalVariable
observationDate: C:WorldBank->Year
observationPeriod: "P1Y"
observationAbout: C:WorldBank->ISO3166Alpha3
value: C:WorldBank->Value
"""
TEMPLATE_STAT_VAR = """
Node: dcid:WorldBank/{INDICATOR}
name: "{NAME}"
description: "{DESCRIPTION}"
typeOf: dcs:StatisticalVariable
populationType: dcs:{populationType}
statType: dcs:{statType}
measuredProperty: dcs:{measuredProperty}
measurementDenominator: dcs:{measurementDenominator}
{CONSTRAINTS}
"""
def read_worldbank(iso3166alpha3):
""" Fetches and tidies all ~1500 World Bank indicators
for a given ISO 3166 alpha 3 code.
For a particular alpha 3 code, this function fetches the entire ZIP
file for that particular country for all World Bank indicators in a
wide format where years are columns. The dataframe is changed into a
narrow format so that year becomes a single column with each row
representing a different year for a single indicator.
Args:
iso3166alpha3: ISO 3166 alpha 3 for a country, as a string.
Returns:
A tidied pandas dataframe with all indicator codes for a particular
country in the format of (country, indicator, year, value).
Notes:
Takes approximately 10 seconds to download and
tidy one country in a Jupyter notebook.
"""
country_zip = ("http://api.worldbank.org/v2/en/country/" + iso3166alpha3 +
"?downloadformat=csv")
r = requests.get(country_zip)
filebytes = io.BytesIO(r.content)
myzipfile = zipfile.ZipFile(filebytes)
# We need to select the data file which starts with "API",
# but does not have an otherwise regular filename structure.
file_to_open = None
for file in myzipfile.namelist():
if file.startswith("API"):
file_to_open = file
break
assert file_to_open is not None, \
"Failed to find data for" + iso3166alpha3
df = None
# Captures any text contained in double quotatations.
line_match = re.compile(r"\"([^\"]*)\"")
for line in myzipfile.open(file_to_open).readlines():
# Cells are contained in quotations and comma separated.
cols = line_match.findall(line.decode("utf-8"))
# CSVs include header informational lines which should be ignored.
if len(cols) > 2:
# Use first row as the header.
if df is None:
df = pd.DataFrame(columns=cols)
else:
df = df.append(pd.DataFrame([cols], columns=df.columns),
ignore_index=True)
df = df.rename(columns=WORLDBANK_COL_REMAP)
# Turn each year into its own row.
df = df.set_index(
['CountryName', 'CountryCode', 'IndicatorName', 'IndicatorCode'])
df = df.stack()
df.index = df.index.rename('year', level=4)
df.name = "Value"
df = df.reset_index()
# Convert to numeric and drop empty values.
df['Value'] = pd.to_numeric(df['Value'])
df = df.dropna()
return df
def build_stat_vars_from_indicator_list(row):
""" Generates World Bank StatVar for a row in the indicators dataframe. """
def row_to_constraints(row):
""" Helper to generate list of constraints. """
constraints_text = ""
next_constraint = 1
while (f"p{next_constraint}" in row and
not pd.isna(row[f"p{next_constraint}"])):
variable = row[f'p{next_constraint}']
constraint = row[f'v{next_constraint}']
constraints_text += f"{variable}: dcs:{constraint}\n"
next_constraint += 1
return constraints_text
# yapf: disable
# Input all required statistical variable fields.
new_stat_var = (TEMPLATE_STAT_VAR
.replace("{INDICATOR}", row['IndicatorCode'].replace(".", "_"))
.replace("{NAME}", row['IndicatorName'])
.replace("{DESCRIPTION}", row['SourceNote'])
.replace("{measuredProperty}", row['measuredProp'])
.replace("{CONSTRAINTS}", row_to_constraints(row))
)
# yapf: enable
# Include or remove option fields.
for optional_col in ([
'populationType', 'statType', 'measurementDenominator'
]):
if not pd.isna(row[optional_col]):
new_stat_var = new_stat_var.replace(f"{{{optional_col}}}",
row[optional_col])
else:
new_stat_var = new_stat_var.replace(
f"{optional_col}: dcs:{{{optional_col}}}\n", "")
return new_stat_var
# MASKED: group_stat_vars_by_observation_properties function (lines 165-216)
def download_indicator_data(worldbank_countries, indicator_codes):
""" Downloads World Bank country data for all countries and
indicators provided.
Retains only the unique indicator codes provided.
Args:
worldbank_countries: Dataframe with ISO 3166 alpha 3 code for each
country.
indicator_code: Dataframe with INDICATOR_CODES to include.
Returns:
worldbank_dataframe: A tidied pandas dataframe where each row has
the format (indicator code, ISO 3166 alpha 3, year, value)
for all countries and all indicators provided.
"""
worldbank_dataframe = pd.DataFrame()
indicators_to_keep = list(indicator_codes['IndicatorCode'].unique())
for index, country_code in enumerate(worldbank_countries['ISO3166Alpha3']):
print(f"Downloading {country_code}")
country_df = read_worldbank(country_code)
# Remove unneccessary indicators.
country_df = country_df[country_df['IndicatorCode'].isin(
indicators_to_keep)]
# Map country codes to ISO.
country_df['ISO3166Alpha3'] = country_code
# Add new row to main datframe.
worldbank_dataframe = worldbank_dataframe.append(country_df)
# Map indicator codes to unique Statistical Variable.
worldbank_dataframe['StatisticalVariable'] = (
worldbank_dataframe['IndicatorCode'].apply(
lambda code: f"WorldBank/{code.replace('.', '_')}"))
return worldbank_dataframe.rename({'year': 'Year'}, axis=1)
def output_csv_and_tmcf_by_grouping(worldbank_dataframe, tmcfs_for_stat_vars,
indicator_codes):
""" Outputs TMCFs and CSVs for each grouping of stat vars.
Args:
worldbank_dataframe: Dataframe containing all indicators for all
countries.
tmcfs_for_stat_vars: Array of tuples of template MCF,
columns on stat var observations,
indicator codes for that template.
indicator_codes -> Dataframe with INDICATOR_CODES to include.
"""
# Only include a subset of columns in the final csv
output_csv = worldbank_dataframe[[
'StatisticalVariable', 'IndicatorCode', 'ISO3166Alpha3', 'Year', 'Value'
]]
# Output tmcf and csv for each unique World Bank grouping.
for index, enum in enumerate(tmcfs_for_stat_vars):
tmcf, stat_var_obs_cols, stat_vars_in_group = enum
if len(stat_vars_in_group) != 0:
with open(f"output/WorldBank_{index}.tmcf", 'w',
newline='') as f_out:
f_out.write(tmcf)
# Output only the indicator codes in that grouping.
matching_csv = output_csv[output_csv['IndicatorCode'].isin(
stat_vars_in_group)]
# Include the Stat Observation columns in the output CSV.
if len(stat_var_obs_cols) > 1:
matching_csv = pd.merge(matching_csv,
indicator_codes[stat_var_obs_cols],
on="IndicatorCode")
# Format to decimals.
matching_csv = matching_csv.round(10)
matching_csv.drop("IndicatorCode",
axis=1).to_csv(f"output/WorldBank_{index}.csv",
float_format='%.10f',
index=False)
def source_scaling_remap(row, scaling_factor_lookup, existing_stat_var_lookup):
""" Scales values by sourceScalingFactor and inputs exisiting stat vars.
First, this function converts all values to per capita. Some measures
in the World Bank dataset are per thousand or per hundred thousand, but
we need to scale these to the common denomination format. Secondly,
some statistical variables such as Count_Person_InLaborForce are not
World Bank specific and need to be replaced. Both of these are imputted
from the following two lists in args.
Args:
scaling_factor_lookup: A dictionary of a mapping between World Bank
indicator code to the respective numeric scaling factor.
existing_stat_var_lookup: A dictionary of a mapping between all
indicator to be replaced with the exisiting stat var to replace it.
"""
indicator_code = row['IndicatorCode']
if indicator_code in scaling_factor_lookup:
row['Value'] = (row['Value'] /
int(scaling_factor_lookup[indicator_code]))
if indicator_code in existing_stat_var_lookup:
row['StatisticalVariable'] = ("dcid:" +
existing_stat_var_lookup[indicator_code])
return row
def main(_):
# Load statistical variable configuration file.
indicator_codes = pd.read_csv("WorldBankIndicators.csv")
# Add source description to note.
def add_source_to_description(row):
if not pd.isna(row['Source']):
return row['SourceNote'] + " " + str(row['Source'])
else:
return row['SourceNote']
indicator_codes['SourceNote'] = indicator_codes.apply(
add_source_to_description, axis=1)
# Generate stat vars
with open("output/WorldBank_StatisticalVariables.mcf", "w+") as f_out:
# Generate StatVars for fields that don't exist. Some fields such as
# Count_Person_Unemployed are already statistical variables so we do
# not need to recreate them.
for _, row in indicator_codes[
indicator_codes['ExistingStatVar'].isna()].iterrows():
f_out.write(build_stat_vars_from_indicator_list(row))
# Create template MCFs for each grouping of stat vars.
tmcfs_for_stat_vars = (
group_stat_vars_by_observation_properties(indicator_codes))
# Download data for all countries.
worldbank_countries = pd.read_csv("WorldBankCountries.csv")
worldbank_dataframe = download_indicator_data(worldbank_countries,
indicator_codes)
# Remap columns to match expected format.
worldbank_dataframe['Value'] = pd.to_numeric(worldbank_dataframe['Value'])
worldbank_dataframe['ISO3166Alpha3'] = (
worldbank_dataframe['ISO3166Alpha3'].apply(
lambda code: "dcs:country/" + code))
worldbank_dataframe['StatisticalVariable'] = \
worldbank_dataframe['StatisticalVariable'].apply(
lambda code: "dcs:" + code)
# Scale values by scaling factor and replace exisiting StatVars.
scaling_factor_lookup = (indicator_codes.set_index("IndicatorCode")
['sourceScalingFactor'].dropna().to_dict())
existing_stat_var_lookup = (indicator_codes.set_index("IndicatorCode")
['ExistingStatVar'].dropna().to_dict())
worldbank_dataframe = worldbank_dataframe.apply(
lambda row: source_scaling_remap(row, scaling_factor_lookup,
existing_stat_var_lookup),
axis=1)
# Convert integer columns.
int_cols = (list(indicator_codes[indicator_codes['ConvertToInt'] == True]
['IndicatorCode'].unique()))
worldbank_subset = worldbank_dataframe[
worldbank_dataframe['IndicatorCode'].isin(int_cols)].index
worldbank_dataframe.loc[worldbank_subset, "Value"] = (pd.to_numeric(
worldbank_dataframe.loc[worldbank_subset, "Value"], downcast="integer"))
# Output final CSVs and variables.
output_csv_and_tmcf_by_grouping(worldbank_dataframe, tmcfs_for_stat_vars,
indicator_codes)
if __name__ == '__main__':
app.run(main)
|
def group_stat_vars_by_observation_properties(indicator_codes):
""" Groups stat vars by their observation schemas.
Groups Stat Vars by their inclusion of StatVar Observation
properties like measurementMethod or Unit.
The current template MCF schema does not support optional values in the
CSV so we must place these stat vars into
different template MCFs and CSVs.
Args:
indicator_codes: List of World Bank indicator codes with
their Data Commons mappings, as a pandas dataframe.
Returns:
Array of tuples for each statistical variable grouping.
1) template MCF, as a string.
2) columns to include in exported csv, as a list of strings.
3) indicator codes in this grouping, as a list of strings.
"""
# All the statistical observation properties that we included.
properties_of_stat_var_observation = ([
'measurementMethod', 'scalingFactor', 'sourceScalingFactor', 'unit'
])
# List of tuples to return.
tmcfs_for_stat_vars = []
# Dataframe that tracks which values are null.
null_status = indicator_codes.notna()
# Iterates over all permutations of stat var properties being included.
for permutation in list(
itertools.product([False, True],
repeat=len(properties_of_stat_var_observation))):
codes_that_match = null_status.copy()
base_template_mcf = TEMPLATE_TMCF
cols_to_include_in_csv = ['IndicatorCode']
# Loop over each obs column and whether to include it.
for include_col, column in (zip(permutation,
properties_of_stat_var_observation)):
# Filter the dataframe by this observation.
codes_that_match = codes_that_match.query(
f"{column} == {include_col}")
# Include the column in TMCF and column list.
if include_col:
base_template_mcf += f"{column}: C:WorldBank->{column}\n"
cols_to_include_in_csv.append(f"{column}")
tmcfs_for_stat_vars.append(
(base_template_mcf, cols_to_include_in_csv,
list(
indicator_codes.loc[codes_that_match.index]['IndicatorCode'])))
return tmcfs_for_stat_vars
| 165 | 216 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fetches, cleans, outputs TMCFs and CSVs for all World Bank development
indicator codes provided in WorldBankIndicators.csv for all years and for
all countries provided in WorldBankCountries.csv. """
from absl import app
import pandas as pd
import itertools
import requests
import zipfile
import io
import re
# Remaps the columns provided by World Bank API.
WORLDBANK_COL_REMAP = {
'Country Name': 'CountryName',
'Country Code': 'CountryCode',
'Indicator Name': 'IndicatorName',
'Indicator Code': 'IndicatorCode'
}
TEMPLATE_TMCF = """
Node: E:WorldBank->E0
typeOf: dcs:StatVarObservation
variableMeasured: C:WorldBank->StatisticalVariable
observationDate: C:WorldBank->Year
observationPeriod: "P1Y"
observationAbout: C:WorldBank->ISO3166Alpha3
value: C:WorldBank->Value
"""
TEMPLATE_STAT_VAR = """
Node: dcid:WorldBank/{INDICATOR}
name: "{NAME}"
description: "{DESCRIPTION}"
typeOf: dcs:StatisticalVariable
populationType: dcs:{populationType}
statType: dcs:{statType}
measuredProperty: dcs:{measuredProperty}
measurementDenominator: dcs:{measurementDenominator}
{CONSTRAINTS}
"""
def read_worldbank(iso3166alpha3):
""" Fetches and tidies all ~1500 World Bank indicators
for a given ISO 3166 alpha 3 code.
For a particular alpha 3 code, this function fetches the entire ZIP
file for that particular country for all World Bank indicators in a
wide format where years are columns. The dataframe is changed into a
narrow format so that year becomes a single column with each row
representing a different year for a single indicator.
Args:
iso3166alpha3: ISO 3166 alpha 3 for a country, as a string.
Returns:
A tidied pandas dataframe with all indicator codes for a particular
country in the format of (country, indicator, year, value).
Notes:
Takes approximately 10 seconds to download and
tidy one country in a Jupyter notebook.
"""
country_zip = ("http://api.worldbank.org/v2/en/country/" + iso3166alpha3 +
"?downloadformat=csv")
r = requests.get(country_zip)
filebytes = io.BytesIO(r.content)
myzipfile = zipfile.ZipFile(filebytes)
# We need to select the data file which starts with "API",
# but does not have an otherwise regular filename structure.
file_to_open = None
for file in myzipfile.namelist():
if file.startswith("API"):
file_to_open = file
break
assert file_to_open is not None, \
"Failed to find data for" + iso3166alpha3
df = None
# Captures any text contained in double quotatations.
line_match = re.compile(r"\"([^\"]*)\"")
for line in myzipfile.open(file_to_open).readlines():
# Cells are contained in quotations and comma separated.
cols = line_match.findall(line.decode("utf-8"))
# CSVs include header informational lines which should be ignored.
if len(cols) > 2:
# Use first row as the header.
if df is None:
df = pd.DataFrame(columns=cols)
else:
df = df.append(pd.DataFrame([cols], columns=df.columns),
ignore_index=True)
df = df.rename(columns=WORLDBANK_COL_REMAP)
# Turn each year into its own row.
df = df.set_index(
['CountryName', 'CountryCode', 'IndicatorName', 'IndicatorCode'])
df = df.stack()
df.index = df.index.rename('year', level=4)
df.name = "Value"
df = df.reset_index()
# Convert to numeric and drop empty values.
df['Value'] = pd.to_numeric(df['Value'])
df = df.dropna()
return df
def build_stat_vars_from_indicator_list(row):
""" Generates World Bank StatVar for a row in the indicators dataframe. """
def row_to_constraints(row):
""" Helper to generate list of constraints. """
constraints_text = ""
next_constraint = 1
while (f"p{next_constraint}" in row and
not pd.isna(row[f"p{next_constraint}"])):
variable = row[f'p{next_constraint}']
constraint = row[f'v{next_constraint}']
constraints_text += f"{variable}: dcs:{constraint}\n"
next_constraint += 1
return constraints_text
# yapf: disable
# Input all required statistical variable fields.
new_stat_var = (TEMPLATE_STAT_VAR
.replace("{INDICATOR}", row['IndicatorCode'].replace(".", "_"))
.replace("{NAME}", row['IndicatorName'])
.replace("{DESCRIPTION}", row['SourceNote'])
.replace("{measuredProperty}", row['measuredProp'])
.replace("{CONSTRAINTS}", row_to_constraints(row))
)
# yapf: enable
# Include or remove option fields.
for optional_col in ([
'populationType', 'statType', 'measurementDenominator'
]):
if not pd.isna(row[optional_col]):
new_stat_var = new_stat_var.replace(f"{{{optional_col}}}",
row[optional_col])
else:
new_stat_var = new_stat_var.replace(
f"{optional_col}: dcs:{{{optional_col}}}\n", "")
return new_stat_var
def group_stat_vars_by_observation_properties(indicator_codes):
""" Groups stat vars by their observation schemas.
Groups Stat Vars by their inclusion of StatVar Observation
properties like measurementMethod or Unit.
The current template MCF schema does not support optional values in the
CSV so we must place these stat vars into
different template MCFs and CSVs.
Args:
indicator_codes: List of World Bank indicator codes with
their Data Commons mappings, as a pandas dataframe.
Returns:
Array of tuples for each statistical variable grouping.
1) template MCF, as a string.
2) columns to include in exported csv, as a list of strings.
3) indicator codes in this grouping, as a list of strings.
"""
# All the statistical observation properties that we included.
properties_of_stat_var_observation = ([
'measurementMethod', 'scalingFactor', 'sourceScalingFactor', 'unit'
])
# List of tuples to return.
tmcfs_for_stat_vars = []
# Dataframe that tracks which values are null.
null_status = indicator_codes.notna()
# Iterates over all permutations of stat var properties being included.
for permutation in list(
itertools.product([False, True],
repeat=len(properties_of_stat_var_observation))):
codes_that_match = null_status.copy()
base_template_mcf = TEMPLATE_TMCF
cols_to_include_in_csv = ['IndicatorCode']
# Loop over each obs column and whether to include it.
for include_col, column in (zip(permutation,
properties_of_stat_var_observation)):
# Filter the dataframe by this observation.
codes_that_match = codes_that_match.query(
f"{column} == {include_col}")
# Include the column in TMCF and column list.
if include_col:
base_template_mcf += f"{column}: C:WorldBank->{column}\n"
cols_to_include_in_csv.append(f"{column}")
tmcfs_for_stat_vars.append(
(base_template_mcf, cols_to_include_in_csv,
list(
indicator_codes.loc[codes_that_match.index]['IndicatorCode'])))
return tmcfs_for_stat_vars
def download_indicator_data(worldbank_countries, indicator_codes):
""" Downloads World Bank country data for all countries and
indicators provided.
Retains only the unique indicator codes provided.
Args:
worldbank_countries: Dataframe with ISO 3166 alpha 3 code for each
country.
indicator_code: Dataframe with INDICATOR_CODES to include.
Returns:
worldbank_dataframe: A tidied pandas dataframe where each row has
the format (indicator code, ISO 3166 alpha 3, year, value)
for all countries and all indicators provided.
"""
worldbank_dataframe = pd.DataFrame()
indicators_to_keep = list(indicator_codes['IndicatorCode'].unique())
for index, country_code in enumerate(worldbank_countries['ISO3166Alpha3']):
print(f"Downloading {country_code}")
country_df = read_worldbank(country_code)
# Remove unneccessary indicators.
country_df = country_df[country_df['IndicatorCode'].isin(
indicators_to_keep)]
# Map country codes to ISO.
country_df['ISO3166Alpha3'] = country_code
# Add new row to main datframe.
worldbank_dataframe = worldbank_dataframe.append(country_df)
# Map indicator codes to unique Statistical Variable.
worldbank_dataframe['StatisticalVariable'] = (
worldbank_dataframe['IndicatorCode'].apply(
lambda code: f"WorldBank/{code.replace('.', '_')}"))
return worldbank_dataframe.rename({'year': 'Year'}, axis=1)
def output_csv_and_tmcf_by_grouping(worldbank_dataframe, tmcfs_for_stat_vars,
indicator_codes):
""" Outputs TMCFs and CSVs for each grouping of stat vars.
Args:
worldbank_dataframe: Dataframe containing all indicators for all
countries.
tmcfs_for_stat_vars: Array of tuples of template MCF,
columns on stat var observations,
indicator codes for that template.
indicator_codes -> Dataframe with INDICATOR_CODES to include.
"""
# Only include a subset of columns in the final csv
output_csv = worldbank_dataframe[[
'StatisticalVariable', 'IndicatorCode', 'ISO3166Alpha3', 'Year', 'Value'
]]
# Output tmcf and csv for each unique World Bank grouping.
for index, enum in enumerate(tmcfs_for_stat_vars):
tmcf, stat_var_obs_cols, stat_vars_in_group = enum
if len(stat_vars_in_group) != 0:
with open(f"output/WorldBank_{index}.tmcf", 'w',
newline='') as f_out:
f_out.write(tmcf)
# Output only the indicator codes in that grouping.
matching_csv = output_csv[output_csv['IndicatorCode'].isin(
stat_vars_in_group)]
# Include the Stat Observation columns in the output CSV.
if len(stat_var_obs_cols) > 1:
matching_csv = pd.merge(matching_csv,
indicator_codes[stat_var_obs_cols],
on="IndicatorCode")
# Format to decimals.
matching_csv = matching_csv.round(10)
matching_csv.drop("IndicatorCode",
axis=1).to_csv(f"output/WorldBank_{index}.csv",
float_format='%.10f',
index=False)
def source_scaling_remap(row, scaling_factor_lookup, existing_stat_var_lookup):
""" Scales values by sourceScalingFactor and inputs exisiting stat vars.
First, this function converts all values to per capita. Some measures
in the World Bank dataset are per thousand or per hundred thousand, but
we need to scale these to the common denomination format. Secondly,
some statistical variables such as Count_Person_InLaborForce are not
World Bank specific and need to be replaced. Both of these are imputted
from the following two lists in args.
Args:
scaling_factor_lookup: A dictionary of a mapping between World Bank
indicator code to the respective numeric scaling factor.
existing_stat_var_lookup: A dictionary of a mapping between all
indicator to be replaced with the exisiting stat var to replace it.
"""
indicator_code = row['IndicatorCode']
if indicator_code in scaling_factor_lookup:
row['Value'] = (row['Value'] /
int(scaling_factor_lookup[indicator_code]))
if indicator_code in existing_stat_var_lookup:
row['StatisticalVariable'] = ("dcid:" +
existing_stat_var_lookup[indicator_code])
return row
def main(_):
# Load statistical variable configuration file.
indicator_codes = pd.read_csv("WorldBankIndicators.csv")
# Add source description to note.
def add_source_to_description(row):
if not pd.isna(row['Source']):
return row['SourceNote'] + " " + str(row['Source'])
else:
return row['SourceNote']
indicator_codes['SourceNote'] = indicator_codes.apply(
add_source_to_description, axis=1)
# Generate stat vars
with open("output/WorldBank_StatisticalVariables.mcf", "w+") as f_out:
# Generate StatVars for fields that don't exist. Some fields such as
# Count_Person_Unemployed are already statistical variables so we do
# not need to recreate them.
for _, row in indicator_codes[
indicator_codes['ExistingStatVar'].isna()].iterrows():
f_out.write(build_stat_vars_from_indicator_list(row))
# Create template MCFs for each grouping of stat vars.
tmcfs_for_stat_vars = (
group_stat_vars_by_observation_properties(indicator_codes))
# Download data for all countries.
worldbank_countries = pd.read_csv("WorldBankCountries.csv")
worldbank_dataframe = download_indicator_data(worldbank_countries,
indicator_codes)
# Remap columns to match expected format.
worldbank_dataframe['Value'] = pd.to_numeric(worldbank_dataframe['Value'])
worldbank_dataframe['ISO3166Alpha3'] = (
worldbank_dataframe['ISO3166Alpha3'].apply(
lambda code: "dcs:country/" + code))
worldbank_dataframe['StatisticalVariable'] = \
worldbank_dataframe['StatisticalVariable'].apply(
lambda code: "dcs:" + code)
# Scale values by scaling factor and replace exisiting StatVars.
scaling_factor_lookup = (indicator_codes.set_index("IndicatorCode")
['sourceScalingFactor'].dropna().to_dict())
existing_stat_var_lookup = (indicator_codes.set_index("IndicatorCode")
['ExistingStatVar'].dropna().to_dict())
worldbank_dataframe = worldbank_dataframe.apply(
lambda row: source_scaling_remap(row, scaling_factor_lookup,
existing_stat_var_lookup),
axis=1)
# Convert integer columns.
int_cols = (list(indicator_codes[indicator_codes['ConvertToInt'] == True]
['IndicatorCode'].unique()))
worldbank_subset = worldbank_dataframe[
worldbank_dataframe['IndicatorCode'].isin(int_cols)].index
worldbank_dataframe.loc[worldbank_subset, "Value"] = (pd.to_numeric(
worldbank_dataframe.loc[worldbank_subset, "Value"], downcast="integer"))
# Output final CSVs and variables.
output_csv_and_tmcf_by_grouping(worldbank_dataframe, tmcfs_for_stat_vars,
indicator_codes)
if __name__ == '__main__':
app.run(main)
|
output_csv_and_tmcf_by_grouping
|
Outputs TMCFs and CSVs for each grouping of stat vars.
Args:
worldbank_dataframe: Dataframe containing all indicators for all
countries.
tmcfs_for_stat_vars: Array of tuples of template MCF,
columns on stat var observations,
indicator codes for that template.
indicator_codes -> Dataframe with INDICATOR_CODES to include.
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fetches, cleans, outputs TMCFs and CSVs for all World Bank development
indicator codes provided in WorldBankIndicators.csv for all years and for
all countries provided in WorldBankCountries.csv. """
from absl import app
import pandas as pd
import itertools
import requests
import zipfile
import io
import re
# Remaps the columns provided by World Bank API.
WORLDBANK_COL_REMAP = {
'Country Name': 'CountryName',
'Country Code': 'CountryCode',
'Indicator Name': 'IndicatorName',
'Indicator Code': 'IndicatorCode'
}
TEMPLATE_TMCF = """
Node: E:WorldBank->E0
typeOf: dcs:StatVarObservation
variableMeasured: C:WorldBank->StatisticalVariable
observationDate: C:WorldBank->Year
observationPeriod: "P1Y"
observationAbout: C:WorldBank->ISO3166Alpha3
value: C:WorldBank->Value
"""
TEMPLATE_STAT_VAR = """
Node: dcid:WorldBank/{INDICATOR}
name: "{NAME}"
description: "{DESCRIPTION}"
typeOf: dcs:StatisticalVariable
populationType: dcs:{populationType}
statType: dcs:{statType}
measuredProperty: dcs:{measuredProperty}
measurementDenominator: dcs:{measurementDenominator}
{CONSTRAINTS}
"""
def read_worldbank(iso3166alpha3):
""" Fetches and tidies all ~1500 World Bank indicators
for a given ISO 3166 alpha 3 code.
For a particular alpha 3 code, this function fetches the entire ZIP
file for that particular country for all World Bank indicators in a
wide format where years are columns. The dataframe is changed into a
narrow format so that year becomes a single column with each row
representing a different year for a single indicator.
Args:
iso3166alpha3: ISO 3166 alpha 3 for a country, as a string.
Returns:
A tidied pandas dataframe with all indicator codes for a particular
country in the format of (country, indicator, year, value).
Notes:
Takes approximately 10 seconds to download and
tidy one country in a Jupyter notebook.
"""
country_zip = ("http://api.worldbank.org/v2/en/country/" + iso3166alpha3 +
"?downloadformat=csv")
r = requests.get(country_zip)
filebytes = io.BytesIO(r.content)
myzipfile = zipfile.ZipFile(filebytes)
# We need to select the data file which starts with "API",
# but does not have an otherwise regular filename structure.
file_to_open = None
for file in myzipfile.namelist():
if file.startswith("API"):
file_to_open = file
break
assert file_to_open is not None, \
"Failed to find data for" + iso3166alpha3
df = None
# Captures any text contained in double quotatations.
line_match = re.compile(r"\"([^\"]*)\"")
for line in myzipfile.open(file_to_open).readlines():
# Cells are contained in quotations and comma separated.
cols = line_match.findall(line.decode("utf-8"))
# CSVs include header informational lines which should be ignored.
if len(cols) > 2:
# Use first row as the header.
if df is None:
df = pd.DataFrame(columns=cols)
else:
df = df.append(pd.DataFrame([cols], columns=df.columns),
ignore_index=True)
df = df.rename(columns=WORLDBANK_COL_REMAP)
# Turn each year into its own row.
df = df.set_index(
['CountryName', 'CountryCode', 'IndicatorName', 'IndicatorCode'])
df = df.stack()
df.index = df.index.rename('year', level=4)
df.name = "Value"
df = df.reset_index()
# Convert to numeric and drop empty values.
df['Value'] = pd.to_numeric(df['Value'])
df = df.dropna()
return df
def build_stat_vars_from_indicator_list(row):
""" Generates World Bank StatVar for a row in the indicators dataframe. """
def row_to_constraints(row):
""" Helper to generate list of constraints. """
constraints_text = ""
next_constraint = 1
while (f"p{next_constraint}" in row and
not pd.isna(row[f"p{next_constraint}"])):
variable = row[f'p{next_constraint}']
constraint = row[f'v{next_constraint}']
constraints_text += f"{variable}: dcs:{constraint}\n"
next_constraint += 1
return constraints_text
# yapf: disable
# Input all required statistical variable fields.
new_stat_var = (TEMPLATE_STAT_VAR
.replace("{INDICATOR}", row['IndicatorCode'].replace(".", "_"))
.replace("{NAME}", row['IndicatorName'])
.replace("{DESCRIPTION}", row['SourceNote'])
.replace("{measuredProperty}", row['measuredProp'])
.replace("{CONSTRAINTS}", row_to_constraints(row))
)
# yapf: enable
# Include or remove option fields.
for optional_col in ([
'populationType', 'statType', 'measurementDenominator'
]):
if not pd.isna(row[optional_col]):
new_stat_var = new_stat_var.replace(f"{{{optional_col}}}",
row[optional_col])
else:
new_stat_var = new_stat_var.replace(
f"{optional_col}: dcs:{{{optional_col}}}\n", "")
return new_stat_var
def group_stat_vars_by_observation_properties(indicator_codes):
""" Groups stat vars by their observation schemas.
Groups Stat Vars by their inclusion of StatVar Observation
properties like measurementMethod or Unit.
The current template MCF schema does not support optional values in the
CSV so we must place these stat vars into
different template MCFs and CSVs.
Args:
indicator_codes: List of World Bank indicator codes with
their Data Commons mappings, as a pandas dataframe.
Returns:
Array of tuples for each statistical variable grouping.
1) template MCF, as a string.
2) columns to include in exported csv, as a list of strings.
3) indicator codes in this grouping, as a list of strings.
"""
# All the statistical observation properties that we included.
properties_of_stat_var_observation = ([
'measurementMethod', 'scalingFactor', 'sourceScalingFactor', 'unit'
])
# List of tuples to return.
tmcfs_for_stat_vars = []
# Dataframe that tracks which values are null.
null_status = indicator_codes.notna()
# Iterates over all permutations of stat var properties being included.
for permutation in list(
itertools.product([False, True],
repeat=len(properties_of_stat_var_observation))):
codes_that_match = null_status.copy()
base_template_mcf = TEMPLATE_TMCF
cols_to_include_in_csv = ['IndicatorCode']
# Loop over each obs column and whether to include it.
for include_col, column in (zip(permutation,
properties_of_stat_var_observation)):
# Filter the dataframe by this observation.
codes_that_match = codes_that_match.query(
f"{column} == {include_col}")
# Include the column in TMCF and column list.
if include_col:
base_template_mcf += f"{column}: C:WorldBank->{column}\n"
cols_to_include_in_csv.append(f"{column}")
tmcfs_for_stat_vars.append(
(base_template_mcf, cols_to_include_in_csv,
list(
indicator_codes.loc[codes_that_match.index]['IndicatorCode'])))
return tmcfs_for_stat_vars
def download_indicator_data(worldbank_countries, indicator_codes):
""" Downloads World Bank country data for all countries and
indicators provided.
Retains only the unique indicator codes provided.
Args:
worldbank_countries: Dataframe with ISO 3166 alpha 3 code for each
country.
indicator_code: Dataframe with INDICATOR_CODES to include.
Returns:
worldbank_dataframe: A tidied pandas dataframe where each row has
the format (indicator code, ISO 3166 alpha 3, year, value)
for all countries and all indicators provided.
"""
worldbank_dataframe = pd.DataFrame()
indicators_to_keep = list(indicator_codes['IndicatorCode'].unique())
for index, country_code in enumerate(worldbank_countries['ISO3166Alpha3']):
print(f"Downloading {country_code}")
country_df = read_worldbank(country_code)
# Remove unneccessary indicators.
country_df = country_df[country_df['IndicatorCode'].isin(
indicators_to_keep)]
# Map country codes to ISO.
country_df['ISO3166Alpha3'] = country_code
# Add new row to main datframe.
worldbank_dataframe = worldbank_dataframe.append(country_df)
# Map indicator codes to unique Statistical Variable.
worldbank_dataframe['StatisticalVariable'] = (
worldbank_dataframe['IndicatorCode'].apply(
lambda code: f"WorldBank/{code.replace('.', '_')}"))
return worldbank_dataframe.rename({'year': 'Year'}, axis=1)
# MASKED: output_csv_and_tmcf_by_grouping function (lines 259-299)
def source_scaling_remap(row, scaling_factor_lookup, existing_stat_var_lookup):
""" Scales values by sourceScalingFactor and inputs exisiting stat vars.
First, this function converts all values to per capita. Some measures
in the World Bank dataset are per thousand or per hundred thousand, but
we need to scale these to the common denomination format. Secondly,
some statistical variables such as Count_Person_InLaborForce are not
World Bank specific and need to be replaced. Both of these are imputted
from the following two lists in args.
Args:
scaling_factor_lookup: A dictionary of a mapping between World Bank
indicator code to the respective numeric scaling factor.
existing_stat_var_lookup: A dictionary of a mapping between all
indicator to be replaced with the exisiting stat var to replace it.
"""
indicator_code = row['IndicatorCode']
if indicator_code in scaling_factor_lookup:
row['Value'] = (row['Value'] /
int(scaling_factor_lookup[indicator_code]))
if indicator_code in existing_stat_var_lookup:
row['StatisticalVariable'] = ("dcid:" +
existing_stat_var_lookup[indicator_code])
return row
def main(_):
# Load statistical variable configuration file.
indicator_codes = pd.read_csv("WorldBankIndicators.csv")
# Add source description to note.
def add_source_to_description(row):
if not pd.isna(row['Source']):
return row['SourceNote'] + " " + str(row['Source'])
else:
return row['SourceNote']
indicator_codes['SourceNote'] = indicator_codes.apply(
add_source_to_description, axis=1)
# Generate stat vars
with open("output/WorldBank_StatisticalVariables.mcf", "w+") as f_out:
# Generate StatVars for fields that don't exist. Some fields such as
# Count_Person_Unemployed are already statistical variables so we do
# not need to recreate them.
for _, row in indicator_codes[
indicator_codes['ExistingStatVar'].isna()].iterrows():
f_out.write(build_stat_vars_from_indicator_list(row))
# Create template MCFs for each grouping of stat vars.
tmcfs_for_stat_vars = (
group_stat_vars_by_observation_properties(indicator_codes))
# Download data for all countries.
worldbank_countries = pd.read_csv("WorldBankCountries.csv")
worldbank_dataframe = download_indicator_data(worldbank_countries,
indicator_codes)
# Remap columns to match expected format.
worldbank_dataframe['Value'] = pd.to_numeric(worldbank_dataframe['Value'])
worldbank_dataframe['ISO3166Alpha3'] = (
worldbank_dataframe['ISO3166Alpha3'].apply(
lambda code: "dcs:country/" + code))
worldbank_dataframe['StatisticalVariable'] = \
worldbank_dataframe['StatisticalVariable'].apply(
lambda code: "dcs:" + code)
# Scale values by scaling factor and replace exisiting StatVars.
scaling_factor_lookup = (indicator_codes.set_index("IndicatorCode")
['sourceScalingFactor'].dropna().to_dict())
existing_stat_var_lookup = (indicator_codes.set_index("IndicatorCode")
['ExistingStatVar'].dropna().to_dict())
worldbank_dataframe = worldbank_dataframe.apply(
lambda row: source_scaling_remap(row, scaling_factor_lookup,
existing_stat_var_lookup),
axis=1)
# Convert integer columns.
int_cols = (list(indicator_codes[indicator_codes['ConvertToInt'] == True]
['IndicatorCode'].unique()))
worldbank_subset = worldbank_dataframe[
worldbank_dataframe['IndicatorCode'].isin(int_cols)].index
worldbank_dataframe.loc[worldbank_subset, "Value"] = (pd.to_numeric(
worldbank_dataframe.loc[worldbank_subset, "Value"], downcast="integer"))
# Output final CSVs and variables.
output_csv_and_tmcf_by_grouping(worldbank_dataframe, tmcfs_for_stat_vars,
indicator_codes)
if __name__ == '__main__':
app.run(main)
|
def output_csv_and_tmcf_by_grouping(worldbank_dataframe, tmcfs_for_stat_vars,
indicator_codes):
""" Outputs TMCFs and CSVs for each grouping of stat vars.
Args:
worldbank_dataframe: Dataframe containing all indicators for all
countries.
tmcfs_for_stat_vars: Array of tuples of template MCF,
columns on stat var observations,
indicator codes for that template.
indicator_codes -> Dataframe with INDICATOR_CODES to include.
"""
# Only include a subset of columns in the final csv
output_csv = worldbank_dataframe[[
'StatisticalVariable', 'IndicatorCode', 'ISO3166Alpha3', 'Year', 'Value'
]]
# Output tmcf and csv for each unique World Bank grouping.
for index, enum in enumerate(tmcfs_for_stat_vars):
tmcf, stat_var_obs_cols, stat_vars_in_group = enum
if len(stat_vars_in_group) != 0:
with open(f"output/WorldBank_{index}.tmcf", 'w',
newline='') as f_out:
f_out.write(tmcf)
# Output only the indicator codes in that grouping.
matching_csv = output_csv[output_csv['IndicatorCode'].isin(
stat_vars_in_group)]
# Include the Stat Observation columns in the output CSV.
if len(stat_var_obs_cols) > 1:
matching_csv = pd.merge(matching_csv,
indicator_codes[stat_var_obs_cols],
on="IndicatorCode")
# Format to decimals.
matching_csv = matching_csv.round(10)
matching_csv.drop("IndicatorCode",
axis=1).to_csv(f"output/WorldBank_{index}.csv",
float_format='%.10f',
index=False)
| 259 | 299 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fetches, cleans, outputs TMCFs and CSVs for all World Bank development
indicator codes provided in WorldBankIndicators.csv for all years and for
all countries provided in WorldBankCountries.csv. """
from absl import app
import pandas as pd
import itertools
import requests
import zipfile
import io
import re
# Remaps the columns provided by World Bank API.
WORLDBANK_COL_REMAP = {
'Country Name': 'CountryName',
'Country Code': 'CountryCode',
'Indicator Name': 'IndicatorName',
'Indicator Code': 'IndicatorCode'
}
TEMPLATE_TMCF = """
Node: E:WorldBank->E0
typeOf: dcs:StatVarObservation
variableMeasured: C:WorldBank->StatisticalVariable
observationDate: C:WorldBank->Year
observationPeriod: "P1Y"
observationAbout: C:WorldBank->ISO3166Alpha3
value: C:WorldBank->Value
"""
TEMPLATE_STAT_VAR = """
Node: dcid:WorldBank/{INDICATOR}
name: "{NAME}"
description: "{DESCRIPTION}"
typeOf: dcs:StatisticalVariable
populationType: dcs:{populationType}
statType: dcs:{statType}
measuredProperty: dcs:{measuredProperty}
measurementDenominator: dcs:{measurementDenominator}
{CONSTRAINTS}
"""
def read_worldbank(iso3166alpha3):
""" Fetches and tidies all ~1500 World Bank indicators
for a given ISO 3166 alpha 3 code.
For a particular alpha 3 code, this function fetches the entire ZIP
file for that particular country for all World Bank indicators in a
wide format where years are columns. The dataframe is changed into a
narrow format so that year becomes a single column with each row
representing a different year for a single indicator.
Args:
iso3166alpha3: ISO 3166 alpha 3 for a country, as a string.
Returns:
A tidied pandas dataframe with all indicator codes for a particular
country in the format of (country, indicator, year, value).
Notes:
Takes approximately 10 seconds to download and
tidy one country in a Jupyter notebook.
"""
country_zip = ("http://api.worldbank.org/v2/en/country/" + iso3166alpha3 +
"?downloadformat=csv")
r = requests.get(country_zip)
filebytes = io.BytesIO(r.content)
myzipfile = zipfile.ZipFile(filebytes)
# We need to select the data file which starts with "API",
# but does not have an otherwise regular filename structure.
file_to_open = None
for file in myzipfile.namelist():
if file.startswith("API"):
file_to_open = file
break
assert file_to_open is not None, \
"Failed to find data for" + iso3166alpha3
df = None
# Captures any text contained in double quotatations.
line_match = re.compile(r"\"([^\"]*)\"")
for line in myzipfile.open(file_to_open).readlines():
# Cells are contained in quotations and comma separated.
cols = line_match.findall(line.decode("utf-8"))
# CSVs include header informational lines which should be ignored.
if len(cols) > 2:
# Use first row as the header.
if df is None:
df = pd.DataFrame(columns=cols)
else:
df = df.append(pd.DataFrame([cols], columns=df.columns),
ignore_index=True)
df = df.rename(columns=WORLDBANK_COL_REMAP)
# Turn each year into its own row.
df = df.set_index(
['CountryName', 'CountryCode', 'IndicatorName', 'IndicatorCode'])
df = df.stack()
df.index = df.index.rename('year', level=4)
df.name = "Value"
df = df.reset_index()
# Convert to numeric and drop empty values.
df['Value'] = pd.to_numeric(df['Value'])
df = df.dropna()
return df
def build_stat_vars_from_indicator_list(row):
""" Generates World Bank StatVar for a row in the indicators dataframe. """
def row_to_constraints(row):
""" Helper to generate list of constraints. """
constraints_text = ""
next_constraint = 1
while (f"p{next_constraint}" in row and
not pd.isna(row[f"p{next_constraint}"])):
variable = row[f'p{next_constraint}']
constraint = row[f'v{next_constraint}']
constraints_text += f"{variable}: dcs:{constraint}\n"
next_constraint += 1
return constraints_text
# yapf: disable
# Input all required statistical variable fields.
new_stat_var = (TEMPLATE_STAT_VAR
.replace("{INDICATOR}", row['IndicatorCode'].replace(".", "_"))
.replace("{NAME}", row['IndicatorName'])
.replace("{DESCRIPTION}", row['SourceNote'])
.replace("{measuredProperty}", row['measuredProp'])
.replace("{CONSTRAINTS}", row_to_constraints(row))
)
# yapf: enable
# Include or remove option fields.
for optional_col in ([
'populationType', 'statType', 'measurementDenominator'
]):
if not pd.isna(row[optional_col]):
new_stat_var = new_stat_var.replace(f"{{{optional_col}}}",
row[optional_col])
else:
new_stat_var = new_stat_var.replace(
f"{optional_col}: dcs:{{{optional_col}}}\n", "")
return new_stat_var
def group_stat_vars_by_observation_properties(indicator_codes):
""" Groups stat vars by their observation schemas.
Groups Stat Vars by their inclusion of StatVar Observation
properties like measurementMethod or Unit.
The current template MCF schema does not support optional values in the
CSV so we must place these stat vars into
different template MCFs and CSVs.
Args:
indicator_codes: List of World Bank indicator codes with
their Data Commons mappings, as a pandas dataframe.
Returns:
Array of tuples for each statistical variable grouping.
1) template MCF, as a string.
2) columns to include in exported csv, as a list of strings.
3) indicator codes in this grouping, as a list of strings.
"""
# All the statistical observation properties that we included.
properties_of_stat_var_observation = ([
'measurementMethod', 'scalingFactor', 'sourceScalingFactor', 'unit'
])
# List of tuples to return.
tmcfs_for_stat_vars = []
# Dataframe that tracks which values are null.
null_status = indicator_codes.notna()
# Iterates over all permutations of stat var properties being included.
for permutation in list(
itertools.product([False, True],
repeat=len(properties_of_stat_var_observation))):
codes_that_match = null_status.copy()
base_template_mcf = TEMPLATE_TMCF
cols_to_include_in_csv = ['IndicatorCode']
# Loop over each obs column and whether to include it.
for include_col, column in (zip(permutation,
properties_of_stat_var_observation)):
# Filter the dataframe by this observation.
codes_that_match = codes_that_match.query(
f"{column} == {include_col}")
# Include the column in TMCF and column list.
if include_col:
base_template_mcf += f"{column}: C:WorldBank->{column}\n"
cols_to_include_in_csv.append(f"{column}")
tmcfs_for_stat_vars.append(
(base_template_mcf, cols_to_include_in_csv,
list(
indicator_codes.loc[codes_that_match.index]['IndicatorCode'])))
return tmcfs_for_stat_vars
def download_indicator_data(worldbank_countries, indicator_codes):
""" Downloads World Bank country data for all countries and
indicators provided.
Retains only the unique indicator codes provided.
Args:
worldbank_countries: Dataframe with ISO 3166 alpha 3 code for each
country.
indicator_code: Dataframe with INDICATOR_CODES to include.
Returns:
worldbank_dataframe: A tidied pandas dataframe where each row has
the format (indicator code, ISO 3166 alpha 3, year, value)
for all countries and all indicators provided.
"""
worldbank_dataframe = pd.DataFrame()
indicators_to_keep = list(indicator_codes['IndicatorCode'].unique())
for index, country_code in enumerate(worldbank_countries['ISO3166Alpha3']):
print(f"Downloading {country_code}")
country_df = read_worldbank(country_code)
# Remove unneccessary indicators.
country_df = country_df[country_df['IndicatorCode'].isin(
indicators_to_keep)]
# Map country codes to ISO.
country_df['ISO3166Alpha3'] = country_code
# Add new row to main datframe.
worldbank_dataframe = worldbank_dataframe.append(country_df)
# Map indicator codes to unique Statistical Variable.
worldbank_dataframe['StatisticalVariable'] = (
worldbank_dataframe['IndicatorCode'].apply(
lambda code: f"WorldBank/{code.replace('.', '_')}"))
return worldbank_dataframe.rename({'year': 'Year'}, axis=1)
def output_csv_and_tmcf_by_grouping(worldbank_dataframe, tmcfs_for_stat_vars,
indicator_codes):
""" Outputs TMCFs and CSVs for each grouping of stat vars.
Args:
worldbank_dataframe: Dataframe containing all indicators for all
countries.
tmcfs_for_stat_vars: Array of tuples of template MCF,
columns on stat var observations,
indicator codes for that template.
indicator_codes -> Dataframe with INDICATOR_CODES to include.
"""
# Only include a subset of columns in the final csv
output_csv = worldbank_dataframe[[
'StatisticalVariable', 'IndicatorCode', 'ISO3166Alpha3', 'Year', 'Value'
]]
# Output tmcf and csv for each unique World Bank grouping.
for index, enum in enumerate(tmcfs_for_stat_vars):
tmcf, stat_var_obs_cols, stat_vars_in_group = enum
if len(stat_vars_in_group) != 0:
with open(f"output/WorldBank_{index}.tmcf", 'w',
newline='') as f_out:
f_out.write(tmcf)
# Output only the indicator codes in that grouping.
matching_csv = output_csv[output_csv['IndicatorCode'].isin(
stat_vars_in_group)]
# Include the Stat Observation columns in the output CSV.
if len(stat_var_obs_cols) > 1:
matching_csv = pd.merge(matching_csv,
indicator_codes[stat_var_obs_cols],
on="IndicatorCode")
# Format to decimals.
matching_csv = matching_csv.round(10)
matching_csv.drop("IndicatorCode",
axis=1).to_csv(f"output/WorldBank_{index}.csv",
float_format='%.10f',
index=False)
def source_scaling_remap(row, scaling_factor_lookup, existing_stat_var_lookup):
""" Scales values by sourceScalingFactor and inputs exisiting stat vars.
First, this function converts all values to per capita. Some measures
in the World Bank dataset are per thousand or per hundred thousand, but
we need to scale these to the common denomination format. Secondly,
some statistical variables such as Count_Person_InLaborForce are not
World Bank specific and need to be replaced. Both of these are imputted
from the following two lists in args.
Args:
scaling_factor_lookup: A dictionary of a mapping between World Bank
indicator code to the respective numeric scaling factor.
existing_stat_var_lookup: A dictionary of a mapping between all
indicator to be replaced with the exisiting stat var to replace it.
"""
indicator_code = row['IndicatorCode']
if indicator_code in scaling_factor_lookup:
row['Value'] = (row['Value'] /
int(scaling_factor_lookup[indicator_code]))
if indicator_code in existing_stat_var_lookup:
row['StatisticalVariable'] = ("dcid:" +
existing_stat_var_lookup[indicator_code])
return row
def main(_):
# Load statistical variable configuration file.
indicator_codes = pd.read_csv("WorldBankIndicators.csv")
# Add source description to note.
def add_source_to_description(row):
if not pd.isna(row['Source']):
return row['SourceNote'] + " " + str(row['Source'])
else:
return row['SourceNote']
indicator_codes['SourceNote'] = indicator_codes.apply(
add_source_to_description, axis=1)
# Generate stat vars
with open("output/WorldBank_StatisticalVariables.mcf", "w+") as f_out:
# Generate StatVars for fields that don't exist. Some fields such as
# Count_Person_Unemployed are already statistical variables so we do
# not need to recreate them.
for _, row in indicator_codes[
indicator_codes['ExistingStatVar'].isna()].iterrows():
f_out.write(build_stat_vars_from_indicator_list(row))
# Create template MCFs for each grouping of stat vars.
tmcfs_for_stat_vars = (
group_stat_vars_by_observation_properties(indicator_codes))
# Download data for all countries.
worldbank_countries = pd.read_csv("WorldBankCountries.csv")
worldbank_dataframe = download_indicator_data(worldbank_countries,
indicator_codes)
# Remap columns to match expected format.
worldbank_dataframe['Value'] = pd.to_numeric(worldbank_dataframe['Value'])
worldbank_dataframe['ISO3166Alpha3'] = (
worldbank_dataframe['ISO3166Alpha3'].apply(
lambda code: "dcs:country/" + code))
worldbank_dataframe['StatisticalVariable'] = \
worldbank_dataframe['StatisticalVariable'].apply(
lambda code: "dcs:" + code)
# Scale values by scaling factor and replace exisiting StatVars.
scaling_factor_lookup = (indicator_codes.set_index("IndicatorCode")
['sourceScalingFactor'].dropna().to_dict())
existing_stat_var_lookup = (indicator_codes.set_index("IndicatorCode")
['ExistingStatVar'].dropna().to_dict())
worldbank_dataframe = worldbank_dataframe.apply(
lambda row: source_scaling_remap(row, scaling_factor_lookup,
existing_stat_var_lookup),
axis=1)
# Convert integer columns.
int_cols = (list(indicator_codes[indicator_codes['ConvertToInt'] == True]
['IndicatorCode'].unique()))
worldbank_subset = worldbank_dataframe[
worldbank_dataframe['IndicatorCode'].isin(int_cols)].index
worldbank_dataframe.loc[worldbank_subset, "Value"] = (pd.to_numeric(
worldbank_dataframe.loc[worldbank_subset, "Value"], downcast="integer"))
# Output final CSVs and variables.
output_csv_and_tmcf_by_grouping(worldbank_dataframe, tmcfs_for_stat_vars,
indicator_codes)
if __name__ == '__main__':
app.run(main)
|
request_file
|
从远端下载文件, 并构建request.FILES中的uploaded file对象返回.
@param url: 文件url路径, 如http://abc.im/12345.jpg
@return: SimpleUploadedFile object, it is containned by the request.FILES(dictionary-like object)
|
#coding=utf-8
#
# Created on Apr 23, 2014, by Junn
#
#
import json
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from rest_framework.response import Response as RfResponse
from core import codes
import urllib
import httplib
import requests
from django.core.files.uploadedfile import SimpleUploadedFile
# MASKED: request_file function (lines 21-31)
def send_request(host, send_url, method='GET', port=80, params={}, timeout=30,
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}):
'''发起http请求. 执行结果返回响应字符串
@param: The sample parameters format like following:
params = {'token': 'dF0zeqAPWs'}
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
host = 'fir.im'
port = 80
method = 'GET'
send_url = '/api/v2/app/version/541a7131f?token=dF0zeqBMXAP'
'''
encoded_params = urllib.urlencode(params)
conn = httplib.HTTPConnection(host, port=port, timeout=timeout)
conn.request(method, send_url, encoded_params, headers)
response = conn.getresponse()
response_str = response.read()
conn.close()
return response_str
class JResponse(HttpResponse):
'''for simple dict response, like success and failed, etc'''
def __init__(self, result, status=200, *args, **kwargs):
if not isinstance(result, list):
if 'errors' in result.keys():
dt = result.pop('errors', {}) or {}
result['msg'] = ''.join([e[0] for e in dt.values()])
super(JResponse, self).__init__(
json.dumps(result), status=status, mimetype='application/json', *args, **kwargs
)
def standard_response(template, req, context):
'''返回http Web response'''
return render_to_response(template, RequestContext(req, context))
class Response(RfResponse):
'''for object json response'''
def __init__(self, data, *args, **kwargs):
if isinstance(data, dict) and 'code' in data.keys(): #data为dict, 且已有code则无需再添加code返回
super(Response, self).__init__(data, *args, **kwargs)
else:
super(Response, self).__init__(codes.append('ok', {'data': data}), *args, **kwargs)
## 注: 此必须声明为函数, 不可声明为常量. 常量值将只在模块import时被赋值
def ok(data={}):
'''data为字典类型数据'''
return JResponse(codes.append('ok', data)) if data else resp('ok')
def failed(msg=''):
return resp('failed', msg)
def object_not_found():
return resp('object_not_found')
def http404():
return resp('not_found')
def resp(crr, msg=''):
'''返回常量错误码. msg可格式化具有占位符的字符串
params:
@crr 错误码标识
'''
return JResponse(codes.fmat(crr, msg))
|
def request_file(url):
'''从远端下载文件, 并构建request.FILES中的uploaded file对象返回.
@param url: 文件url路径, 如http://abc.im/12345.jpg
@return: SimpleUploadedFile object, it is containned by the request.FILES(dictionary-like object)
'''
if not url:
return
response = requests.get(url)
return SimpleUploadedFile('file', response.content)
| 21 | 31 |
#coding=utf-8
#
# Created on Apr 23, 2014, by Junn
#
#
import json
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from rest_framework.response import Response as RfResponse
from core import codes
import urllib
import httplib
import requests
from django.core.files.uploadedfile import SimpleUploadedFile
def request_file(url):
'''从远端下载文件, 并构建request.FILES中的uploaded file对象返回.
@param url: 文件url路径, 如http://abc.im/12345.jpg
@return: SimpleUploadedFile object, it is containned by the request.FILES(dictionary-like object)
'''
if not url:
return
response = requests.get(url)
return SimpleUploadedFile('file', response.content)
def send_request(host, send_url, method='GET', port=80, params={}, timeout=30,
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}):
'''发起http请求. 执行结果返回响应字符串
@param: The sample parameters format like following:
params = {'token': 'dF0zeqAPWs'}
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
host = 'fir.im'
port = 80
method = 'GET'
send_url = '/api/v2/app/version/541a7131f?token=dF0zeqBMXAP'
'''
encoded_params = urllib.urlencode(params)
conn = httplib.HTTPConnection(host, port=port, timeout=timeout)
conn.request(method, send_url, encoded_params, headers)
response = conn.getresponse()
response_str = response.read()
conn.close()
return response_str
class JResponse(HttpResponse):
'''for simple dict response, like success and failed, etc'''
def __init__(self, result, status=200, *args, **kwargs):
if not isinstance(result, list):
if 'errors' in result.keys():
dt = result.pop('errors', {}) or {}
result['msg'] = ''.join([e[0] for e in dt.values()])
super(JResponse, self).__init__(
json.dumps(result), status=status, mimetype='application/json', *args, **kwargs
)
def standard_response(template, req, context):
'''返回http Web response'''
return render_to_response(template, RequestContext(req, context))
class Response(RfResponse):
'''for object json response'''
def __init__(self, data, *args, **kwargs):
if isinstance(data, dict) and 'code' in data.keys(): #data为dict, 且已有code则无需再添加code返回
super(Response, self).__init__(data, *args, **kwargs)
else:
super(Response, self).__init__(codes.append('ok', {'data': data}), *args, **kwargs)
## 注: 此必须声明为函数, 不可声明为常量. 常量值将只在模块import时被赋值
def ok(data={}):
'''data为字典类型数据'''
return JResponse(codes.append('ok', data)) if data else resp('ok')
def failed(msg=''):
return resp('failed', msg)
def object_not_found():
return resp('object_not_found')
def http404():
return resp('not_found')
def resp(crr, msg=''):
'''返回常量错误码. msg可格式化具有占位符的字符串
params:
@crr 错误码标识
'''
return JResponse(codes.fmat(crr, msg))
|
send_request
|
发起http请求. 执行结果返回响应字符串
@param: The sample parameters format like following:
params = {'token': 'dF0zeqAPWs'}
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
host = 'fir.im'
port = 80
method = 'GET'
send_url = '/api/v2/app/version/541a7131f?token=dF0zeqBMXAP'
|
#coding=utf-8
#
# Created on Apr 23, 2014, by Junn
#
#
import json
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from rest_framework.response import Response as RfResponse
from core import codes
import urllib
import httplib
import requests
from django.core.files.uploadedfile import SimpleUploadedFile
def request_file(url):
'''从远端下载文件, 并构建request.FILES中的uploaded file对象返回.
@param url: 文件url路径, 如http://abc.im/12345.jpg
@return: SimpleUploadedFile object, it is containned by the request.FILES(dictionary-like object)
'''
if not url:
return
response = requests.get(url)
return SimpleUploadedFile('file', response.content)
# MASKED: send_request function (lines 33-54)
class JResponse(HttpResponse):
'''for simple dict response, like success and failed, etc'''
def __init__(self, result, status=200, *args, **kwargs):
if not isinstance(result, list):
if 'errors' in result.keys():
dt = result.pop('errors', {}) or {}
result['msg'] = ''.join([e[0] for e in dt.values()])
super(JResponse, self).__init__(
json.dumps(result), status=status, mimetype='application/json', *args, **kwargs
)
def standard_response(template, req, context):
'''返回http Web response'''
return render_to_response(template, RequestContext(req, context))
class Response(RfResponse):
'''for object json response'''
def __init__(self, data, *args, **kwargs):
if isinstance(data, dict) and 'code' in data.keys(): #data为dict, 且已有code则无需再添加code返回
super(Response, self).__init__(data, *args, **kwargs)
else:
super(Response, self).__init__(codes.append('ok', {'data': data}), *args, **kwargs)
## 注: 此必须声明为函数, 不可声明为常量. 常量值将只在模块import时被赋值
def ok(data={}):
'''data为字典类型数据'''
return JResponse(codes.append('ok', data)) if data else resp('ok')
def failed(msg=''):
return resp('failed', msg)
def object_not_found():
return resp('object_not_found')
def http404():
return resp('not_found')
def resp(crr, msg=''):
'''返回常量错误码. msg可格式化具有占位符的字符串
params:
@crr 错误码标识
'''
return JResponse(codes.fmat(crr, msg))
|
def send_request(host, send_url, method='GET', port=80, params={}, timeout=30,
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}):
'''发起http请求. 执行结果返回响应字符串
@param: The sample parameters format like following:
params = {'token': 'dF0zeqAPWs'}
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
host = 'fir.im'
port = 80
method = 'GET'
send_url = '/api/v2/app/version/541a7131f?token=dF0zeqBMXAP'
'''
encoded_params = urllib.urlencode(params)
conn = httplib.HTTPConnection(host, port=port, timeout=timeout)
conn.request(method, send_url, encoded_params, headers)
response = conn.getresponse()
response_str = response.read()
conn.close()
return response_str
| 33 | 54 |
#coding=utf-8
#
# Created on Apr 23, 2014, by Junn
#
#
import json
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from rest_framework.response import Response as RfResponse
from core import codes
import urllib
import httplib
import requests
from django.core.files.uploadedfile import SimpleUploadedFile
def request_file(url):
'''从远端下载文件, 并构建request.FILES中的uploaded file对象返回.
@param url: 文件url路径, 如http://abc.im/12345.jpg
@return: SimpleUploadedFile object, it is containned by the request.FILES(dictionary-like object)
'''
if not url:
return
response = requests.get(url)
return SimpleUploadedFile('file', response.content)
def send_request(host, send_url, method='GET', port=80, params={}, timeout=30,
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}):
'''发起http请求. 执行结果返回响应字符串
@param: The sample parameters format like following:
params = {'token': 'dF0zeqAPWs'}
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
host = 'fir.im'
port = 80
method = 'GET'
send_url = '/api/v2/app/version/541a7131f?token=dF0zeqBMXAP'
'''
encoded_params = urllib.urlencode(params)
conn = httplib.HTTPConnection(host, port=port, timeout=timeout)
conn.request(method, send_url, encoded_params, headers)
response = conn.getresponse()
response_str = response.read()
conn.close()
return response_str
class JResponse(HttpResponse):
'''for simple dict response, like success and failed, etc'''
def __init__(self, result, status=200, *args, **kwargs):
if not isinstance(result, list):
if 'errors' in result.keys():
dt = result.pop('errors', {}) or {}
result['msg'] = ''.join([e[0] for e in dt.values()])
super(JResponse, self).__init__(
json.dumps(result), status=status, mimetype='application/json', *args, **kwargs
)
def standard_response(template, req, context):
'''返回http Web response'''
return render_to_response(template, RequestContext(req, context))
class Response(RfResponse):
'''for object json response'''
def __init__(self, data, *args, **kwargs):
if isinstance(data, dict) and 'code' in data.keys(): #data为dict, 且已有code则无需再添加code返回
super(Response, self).__init__(data, *args, **kwargs)
else:
super(Response, self).__init__(codes.append('ok', {'data': data}), *args, **kwargs)
## 注: 此必须声明为函数, 不可声明为常量. 常量值将只在模块import时被赋值
def ok(data={}):
'''data为字典类型数据'''
return JResponse(codes.append('ok', data)) if data else resp('ok')
def failed(msg=''):
return resp('failed', msg)
def object_not_found():
return resp('object_not_found')
def http404():
return resp('not_found')
def resp(crr, msg=''):
'''返回常量错误码. msg可格式化具有占位符的字符串
params:
@crr 错误码标识
'''
return JResponse(codes.fmat(crr, msg))
|
send_async_http
|
发送一个异步请求至某个特定url,实现失败重试
每一次失败后会延时一段时间再去重试,延时时间由
interval和wait_factor决定
:param session:请求的异步session
:param method:请求方法
:param url:请求url
:param retries:失败重试次数
:param interval:失败后的再次异步请求的延时时长
:param wait_factor:每一次失败后延时乘以这个因子,延长重试等待时间,一般1<wf<2,即延时最多2^retries秒
:param timeout:连接超时时长
:param success_callback:成功回调函数
:param fail_callback:失败回调函数
:param kwargs:其他键值参数
:return:返回字典结果
|
#coding:utf-8
"""
@author : linkin
@email : [email protected]
@date : 2018-11-07
"""
import asyncio
import datetime
# MASKED: send_async_http function (lines 10-64)
|
async def send_async_http(session,method,url,*,
retries=1,
interval=1,
wait_factor=2,
timeout=30,
success_callback=None,
fail_callback=None,
**kwargs) -> dict:
"""
发送一个异步请求至某个特定url,实现失败重试
每一次失败后会延时一段时间再去重试,延时时间由
interval和wait_factor决定
:param session:请求的异步session
:param method:请求方法
:param url:请求url
:param retries:失败重试次数
:param interval:失败后的再次异步请求的延时时长
:param wait_factor:每一次失败后延时乘以这个因子,延长重试等待时间,一般1<wf<2,即延时最多2^retries秒
:param timeout:连接超时时长
:param success_callback:成功回调函数
:param fail_callback:失败回调函数
:param kwargs:其他键值参数
:return:返回字典结果
"""
exception = None
ret = {'cost':None,'code':0,'exception':exception,'tries':-1}
wait_interval = interval
if method.lower() not in ['get', 'head', 'post']:
return ret
if retries == -1: # -1 表示无限次失败重试
attempt = -1
elif retries == 0: # 0 表示不进行失败重试
attempt = 1
else:
attempt = retries + 1
while attempt != 0:
try:
start = datetime.datetime.now()
async with getattr(session,method)(url,timeout=timeout,**kwargs) as response:
end = datetime.datetime.now()
t = (end - start).total_seconds()
code = response.status
ret = {'cost': t, 'code': code, 'tries': retries - attempt+1}
if success_callback:
success_callback(ret)
return ret
except Exception as e:
ret['exception'] = e
ret['tries'] += 1
await asyncio.sleep(wait_interval)
wait_interval = wait_interval * wait_factor
attempt-=1
if fail_callback:
fail_callback(ret)
return ret
| 10 | 64 |
#coding:utf-8
"""
@author : linkin
@email : [email protected]
@date : 2018-11-07
"""
import asyncio
import datetime
async def send_async_http(session,method,url,*,
retries=1,
interval=1,
wait_factor=2,
timeout=30,
success_callback=None,
fail_callback=None,
**kwargs) -> dict:
"""
发送一个异步请求至某个特定url,实现失败重试
每一次失败后会延时一段时间再去重试,延时时间由
interval和wait_factor决定
:param session:请求的异步session
:param method:请求方法
:param url:请求url
:param retries:失败重试次数
:param interval:失败后的再次异步请求的延时时长
:param wait_factor:每一次失败后延时乘以这个因子,延长重试等待时间,一般1<wf<2,即延时最多2^retries秒
:param timeout:连接超时时长
:param success_callback:成功回调函数
:param fail_callback:失败回调函数
:param kwargs:其他键值参数
:return:返回字典结果
"""
exception = None
ret = {'cost':None,'code':0,'exception':exception,'tries':-1}
wait_interval = interval
if method.lower() not in ['get', 'head', 'post']:
return ret
if retries == -1: # -1 表示无限次失败重试
attempt = -1
elif retries == 0: # 0 表示不进行失败重试
attempt = 1
else:
attempt = retries + 1
while attempt != 0:
try:
start = datetime.datetime.now()
async with getattr(session,method)(url,timeout=timeout,**kwargs) as response:
end = datetime.datetime.now()
t = (end - start).total_seconds()
code = response.status
ret = {'cost': t, 'code': code, 'tries': retries - attempt+1}
if success_callback:
success_callback(ret)
return ret
except Exception as e:
ret['exception'] = e
ret['tries'] += 1
await asyncio.sleep(wait_interval)
wait_interval = wait_interval * wait_factor
attempt-=1
if fail_callback:
fail_callback(ret)
return ret
|
_calc_uia
|
Calculate the UI(a) by providing the labeling history and the majority vote results.
Parameters
----------
oracle_history: dict
The labeling history of an oracle. The key is the index of instance, the value is the
label given by the oracle.
majority_vote_result: dict
The results of majority vote of instances. The key is the index of instance,
the value is the label given by the oracle.
alpha: float, optional (default=0.05)
Used for calculating the critical value for the Student’s t-distribution with n−1
degrees of freedom at the alpha/2 confidence level.
Returns
-------
uia: float
The UI(a) value.
|
"""
Pre-defined query strategy for noisy oracles.
In reality, the labels given by human is not always correct. For one hand,
there are some inevitable noise comes from the instrumentation of experimental
setting. On the other hand, people can become distracted or fatigued over time,
introducing variability in the quality of their annotations.
ALiPy implements several strategies in noisy oracles settings.
Some of then mainly evaluate the quality or expertise of each oracle,
and the rest tries to obtain the accurate label for each instance
whose labels are provided by several noisy oracles.
There are 2 categories of methods.
1. Query from a single selected oracle.
1.1 Always query from the best oracle
1.2 Query from the most appropriate oracle
according to the selected instance and label.
2. Query from multiple noisy oracles. Labels are obtained from multiple noisy oracles.
And the algorithm tries to obtain the accurate label for each instance.
Implement method:
1: CEAL (IJCAI'17)
2: IEthresh (KDD'09 Donmez)
Baselines:
Majority vote
Query from all oracles and majority vote
Random select an oracle
"""
# Authors: Ying-Peng Tang
# License: BSD 3 clause
from __future__ import division
import collections
from abc import ABCMeta, abstractmethod
import copy
import numpy as np
import scipy.stats
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import NearestNeighbors
from .base import BaseNoisyOracleQuery
from .query_labels import QueryInstanceUncertainty
from .query_labels import _get_proba_pred
from ..oracle import Oracles, Oracle
def majority_vote(labels, weight=None):
"""Perform majority vote to determine the true label from
multiple noisy oracles.
Parameters
----------
labels: list
A list with length=k, which contains the labels provided by
k noisy oracles.
weight: list, optional (default=None)
The weights of each oracle. It should have the same length with
labels.
Returns
-------
vote_count: int
The number of votes.
vote_result: object
The label of the selected_instance, produced by majority voting
of the selected oracles.
"""
oracle_weight = np.ones(len(labels)) if weight is None else weight
assert len(labels) == len(oracle_weight)
vote_result = collections.Counter(labels)
most_votes = vote_result.most_common(n=1)
return most_votes[0][1], most_votes[0][0]
def get_query_results(selected_instance, oracles, names=None):
"""Get the query results from oracles of the selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
names: list, optional (default=None)
A list of str which contains the names of oracles to query from.
If not provided, it will query from all oracles.
Each name should in oracles.names().
Returns
-------
query_labels: list
The queried labels.
query_costs: list
The total cost of query.
"""
costs = []
if isinstance(oracles, list):
oracle_type = 'list'
for oracle in oracles:
assert isinstance(oracle, Oracle)
elif isinstance(oracles, Oracles):
oracle_type = 'oracles'
else:
raise TypeError("The type of parameter oracles must be a list or alipy.oracle.Oracles object.")
labeling_results = []
if oracle_type == 'list':
for i in oracles.names() if oracle_type == 'oracles' else range(len(oracles)):
lab, co = oracles[i].query_by_index(selected_instance)
labeling_results.append(lab[0])
costs.append(np.sum(co))
else:
results = oracles.query_from_s(selected_instance, oracles_name=names)
labeling_results = [res[0][0] for res in results]
costs = [np.sum(res[1]) for res in results]
return labeling_results, costs
def get_majority_vote(selected_instance, oracles, names=None):
"""Get the majority vote results of the selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
names: list, optional (default=None)
A list of str which contains the names of oracles to query from.
If not provided, it will query from all oracles.
Each name should in oracles.names().
Returns
-------
vote_count: int
The number of votes.
vote_result: object
The label of the selected_instance, produced by majority voting
of the selected oracles.
query_costs: int
The total cost of query.
"""
labeling_results, cost = get_query_results(selected_instance, oracles, names)
majority_vote_result = majority_vote(labeling_results)
return majority_vote_result[0], majority_vote_result[1], np.sum(cost)
class QueryNoisyOraclesCEAL(BaseNoisyOracleQuery):
"""Cost-Effective Active Learning from Diverse Labelers (CEAL) method assumes
that different oracles have different expertise. Even the very noisy oracle
may perform well on some kind of examples. The cost of a labeler is proportional
to its overall labeling quality and it is thus necessary to query from the right oracle
according to the selected instance.
This method will select an instance-labeler pair (x, a), and queries the label of x
from a, where the selection of both the instance and labeler is based on a
evaluation function Q(x, a).
The selection of instance is depend on its uncertainty. The selection of oracle is
depend on the oracle's performance on the nearest neighbors of selected instance.
The cost of each oracle is proportional to its overall labeling quality.
Parameters
----------
X: 2D array, optional (default=None)
Feature matrix of the whole dataset. It is a reference which will not use additional memory.
y: array-like, optional (default=None)
Label matrix of the whole dataset. It is a reference which will not use additional memory.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
initial_labeled_indexes: {list, np.ndarray, IndexCollection}
The indexes of initially labeled samples. Used for initializing the scores of each oracle.
References
----------
[1] Sheng-Jun Huang, Jia-Lve Chen, Xin Mu, Zhi-Hua Zhou. 2017.
Cost-Effective Active Learning from Diverse Labelers. In The
Proceedings of the 26th International Joint Conference
on Artificial Intelligence (IJCAI-17), 1879-1885.
"""
def __init__(self, X, y, oracles, initial_labeled_indexes):
super(QueryNoisyOraclesCEAL, self).__init__(X, y, oracles=oracles)
# ytype = type_of_target(self.y)
# if 'multilabel' in ytype:
# warnings.warn("This query strategy does not support multi-label.",
# category=FunctionWarning)
assert (isinstance(initial_labeled_indexes, collections.Iterable))
self._ini_ind = np.asarray(initial_labeled_indexes)
# construct a nearest neighbor object implemented by scikit-learn
self._nntree = NearestNeighbors(metric='euclidean')
self._nntree.fit(self.X[self._ini_ind])
def select(self, label_index, unlabel_index, eval_cost=False, model=None, **kwargs):
"""Query from oracles. Return the index of selected instance and oracle.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
eval_cost: bool, optional (default=False)
To evaluate the cost of oracles or use the cost provided by oracles.
model: object, optional (default=None)
Current classification model, should have the 'predict_proba' method for probabilistic output.
If not provided, LogisticRegression with default parameters implemented by sklearn will be used.
n_neighbors: int, optional (default=10)
How many neighbors of the selected instance will be used
to evaluate the oracles.
Returns
-------
selected_instance: int
The index of selected instance.
selected_oracle: int or str
The index of selected oracle.
If a list is given, the index of oracle will be returned.
If a Oracles object is given, the oracle name will be returned.
"""
if model is None:
model = LogisticRegression(solver='liblinear')
model.fit(self.X[label_index], self.y[label_index])
pred_unlab, _ = _get_proba_pred(self.X[unlabel_index], model)
n_neighbors = min(kwargs.pop('n_neighbors', 10), len(self._ini_ind) - 1)
return self.select_by_prediction_mat(label_index, unlabel_index, pred_unlab,
n_neighbors=n_neighbors, eval_cost=eval_cost)
def select_by_prediction_mat(self, label_index, unlabel_index, predict, **kwargs):
"""Query from oracles. Return the index of selected instance and oracle.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
predict: : 2d array, shape [n_samples, n_classes]
The probabilistic prediction matrix for the unlabeled set.
n_neighbors: int, optional (default=10)
How many neighbors of the selected instance will be used
to evaluate the oracles.
eval_cost: bool, optional (default=False)
To evaluate the cost of oracles or use the cost provided by oracles.
Returns
-------
selected_instance: int
The index of selected instance.
selected_oracle: int or str
The index of selected oracle.
If a list is given, the index of oracle will be returned.
If a Oracles object is given, the oracle name will be returned.
"""
n_neighbors = min(kwargs.pop('n_neighbors', 10), len(self._ini_ind)-1)
eval_cost = kwargs.pop('n_neighbors', False)
Q_table, oracle_ind_name_dict = self._calc_Q_table(label_index, unlabel_index, self._oracles, predict,
n_neighbors=n_neighbors, eval_cost=eval_cost)
# get the instance-oracle pair
selected_pair = np.unravel_index(np.argmax(Q_table, axis=None), Q_table.shape)
sel_ora = oracle_ind_name_dict[selected_pair[0]]
if not isinstance(sel_ora, list):
sel_ora = [sel_ora]
return [unlabel_index[selected_pair[1]]], sel_ora
def _calc_Q_table(self, label_index, unlabel_index, oracles, pred_unlab, n_neighbors=10, eval_cost=False):
"""Query from oracles. Return the Q table and the oracle name/index of each row of Q_table.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
predict: : 2d array, shape [n_samples, n_classes]
The probabilistic prediction matrix for the unlabeled set.
n_neighbors: int, optional (default=10)
How many neighbors of the selected instance will be used
to evaluate the oracles.
eval_cost: bool, optional (default=False)
To evaluate the cost of oracles or use the cost provided by oracles.
Returns
-------
Q_table: 2D array
The Q table.
oracle_ind_name_dict: dict
The oracle name/index of each row of Q_table.
"""
# Check parameter and initialize variables
if self.X is None or self.y is None:
raise Exception('Data matrix is not provided, use select_by_prediction_mat() instead.')
assert (isinstance(unlabel_index, collections.Iterable))
assert (isinstance(label_index, collections.Iterable))
unlabel_index = np.asarray(unlabel_index)
label_index = np.asarray(label_index)
num_of_neighbors = n_neighbors
if len(unlabel_index) <= 1:
return unlabel_index
Q_table = np.zeros((len(oracles), len(unlabel_index))) # row:oracle, col:ins
spv = np.shape(pred_unlab)
# calc least_confident
rx = np.partition(pred_unlab, spv[1] - 1, axis=1)
rx = 1 - rx[:, spv[1] - 1]
for unlab_ind, unlab_ins_ind in enumerate(unlabel_index):
# evaluate oracles for each instance
nn_dist, nn_of_selected_ins = self._nntree.kneighbors(X=self.X[unlab_ins_ind].reshape(1, -1),
n_neighbors=num_of_neighbors,
return_distance=True)
nn_dist = nn_dist[0]
nn_of_selected_ins = nn_of_selected_ins[0]
nn_of_selected_ins = self._ini_ind[nn_of_selected_ins] # map to the original population
oracles_score = []
for ora_ind, ora_name in enumerate(self._oracles_iterset):
# calc q_i(x), expertise of this instance
oracle = oracles[ora_name]
labels, cost = oracle.query_by_index(nn_of_selected_ins)
oracles_score.append(sum([nn_dist[i] * (labels[i] == self.y[nn_of_selected_ins[i]]) for i in
range(num_of_neighbors)]) / num_of_neighbors)
# calc c_i, cost of each labeler
labels, cost = oracle.query_by_index(label_index)
if eval_cost:
oracles_cost = sum([labels[i] == self.y[label_index[i]] for i in range(len(label_index))]) / len(label_index)
else:
oracles_cost = cost[0]
Q_table[ora_ind, unlab_ind] = oracles_score[ora_ind] * rx[unlab_ind] / max(oracles_cost, 0.0001)
return Q_table, self._oracle_ind_name_dict
class QueryNoisyOraclesSelectInstanceUncertainty(BaseNoisyOracleQuery, metaclass=ABCMeta):
"""This class implement select and select_by_prediction_mat by uncertainty."""
def __init__(self, X=None, y=None, oracles=None):
super(QueryNoisyOraclesSelectInstanceUncertainty, self).__init__(X=X, y=y, oracles=oracles)
def select(self, label_index, unlabel_index, model=None, **kwargs):
"""Select an instance and a batch of oracles to label it.
The instance is selected by uncertainty, the oracles is
selected by the difference between their
labeling results and the majority vote results.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
Returns
-------
selected_instance: int
The index of selected instance. Selected by uncertainty.
selected_oracles: list
The selected oracles for querying.
"""
if model is None:
model = LogisticRegression(solver='liblinear')
model.fit(self.X[label_index], self.y[label_index])
pred_unlab, _ = _get_proba_pred(self.X[unlabel_index], model)
return self.select_by_prediction_mat(label_index, unlabel_index, pred_unlab)
def select_by_prediction_mat(self, label_index, unlabel_index, predict):
"""Query from oracles. Return the index of selected instance and oracle.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
predict: : 2d array, shape [n_samples, n_classes]
The probabilistic prediction matrix for the unlabeled set.
Returns
-------
selected_instance: int
The index of selected instance. Selected by uncertainty.
selected_oracles: list
The selected oracles for querying.
"""
# Check parameter and initialize variables
assert (isinstance(unlabel_index, collections.Iterable))
assert (isinstance(label_index, collections.Iterable))
unlabel_index = np.asarray(unlabel_index)
label_index = np.asarray(label_index)
if len(unlabel_index) <= 1:
return unlabel_index
# select instance and oracle
unc = QueryInstanceUncertainty(measure='least_confident')
selected_instance = unc.select_by_prediction_mat(unlabel_index=unlabel_index, predict=predict, batch_size=1)[0]
return [selected_instance], self.select_by_given_instance(selected_instance)
@abstractmethod
def select_by_given_instance(self, selected_instance):
pass
class QueryNoisyOraclesIEthresh(QueryNoisyOraclesSelectInstanceUncertainty):
"""IEthresh will select a batch of oracles to label the selected instance.
It will score for each oracle according to the difference between their
labeling results and the majority vote results.
At each iteration, a batch of oracles whose scores are larger than a threshold will be selected.
Oracle with a higher score is more likely to be selected.
Parameters
----------
X: 2D array, optional (default=None)
Feature matrix of the whole dataset. It is a reference which will not use additional memory.
y: array-like, optional (default=None)
Label matrix of the whole dataset. It is a reference which will not use additional memory.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
initial_labeled_indexes: {list, np.ndarray, IndexCollection}
The indexes of initially labeled samples. Used for initializing the scores of each oracle.
epsilon: float, optional (default=0.1)
The value to determine how many oracles will be selected.
S_t = {a|UI(a) >= epsilon * max UI(a)}
References
----------
[1] Donmez P , Carbonell J G , Schneider J . Efficiently learning the accuracy of labeling
sources for selective sampling.[C] ACM SIGKDD International Conference on
Knowledge Discovery & Data Mining. ACM, 2009.
"""
def __init__(self, X, y, oracles, initial_labeled_indexes, **kwargs):
super(QueryNoisyOraclesIEthresh, self).__init__(X, y, oracles=oracles)
self._ini_ind = np.asarray(initial_labeled_indexes)
# record the labeling history of each oracle
self._oracles_history = dict()
for i in range(len(self._oracles_iterset)):
self._oracles_history[i] = dict()
# record the results of majority vote
self._majority_vote_results = dict()
# calc initial QI(a) for each oracle a
self._UI = np.ones(len(self._oracles_iterset))
self.epsilon = kwargs.pop('epsilon', 0.8)
# MASKED: _calc_uia function (lines 498-531)
def select_by_given_instance(self, selected_instance):
"""Select oracle to query by providing the index of selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
Returns
-------
selected_oracles: list
The selected oracles for querying.
"""
selected_oracles = np.nonzero(self._UI >= self.epsilon * np.max(self._UI))
selected_oracles = selected_oracles[0]
# update UI(a) for each selected oracle
labeling_results = []
for i in selected_oracles:
lab, _ = self._oracles[self._oracle_ind_name_dict[i]].query_by_index(selected_instance)
labeling_results.append(lab[0])
self._oracles_history[i][selected_instance] = copy.copy(lab[0])
_, majority_vote_result = majority_vote(labeling_results)
reward_arr = np.zeros(len(selected_oracles))
same_ind = np.nonzero(labeling_results == majority_vote_result)[0]
reward_arr[same_ind] = 1
self._majority_vote_results[selected_instance] = majority_vote_result
for i in selected_oracles:
self._UI[i] = self._calc_uia(self._oracles_history[i], self._majority_vote_results)
# return results
return [self._oracle_ind_name_dict[i] for i in selected_oracles]
class QueryNoisyOraclesAll(QueryNoisyOraclesSelectInstanceUncertainty):
"""This strategy will select instance by uncertainty and query from all
oracles and return the majority vote result.
Parameters
----------
X: 2D array, optional (default=None)
Feature matrix of the whole dataset. It is a reference which will not use additional memory.
y: array-like, optional (default=None)
Label matrix of the whole dataset. It is a reference which will not use additional memory.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
"""
def __init__(self, oracles, X=None, y=None):
super(QueryNoisyOraclesAll, self).__init__(X=X, y=y, oracles=oracles)
def select_by_given_instance(self, selected_instance):
"""Select oracle to query by providing the index of selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
Returns
-------
oracles_ind: list
The indexes of selected oracles.
"""
return self._oracle_ind_name_dict.values()
class QueryNoisyOraclesRandom(QueryNoisyOraclesSelectInstanceUncertainty):
"""Select a random oracle to query."""
def select_by_given_instance(self, selected_instance):
"""Select oracle to query by providing the index of selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
Returns
-------
oracles_ind: list
The indexes of selected oracles.
"""
return [self._oracle_ind_name_dict[np.random.randint(0, len(self._oracles), 1)[0]]]
|
def _calc_uia(self, oracle_history, majority_vote_result, alpha=0.05):
"""Calculate the UI(a) by providing the labeling history and the majority vote results.
Parameters
----------
oracle_history: dict
The labeling history of an oracle. The key is the index of instance, the value is the
label given by the oracle.
majority_vote_result: dict
The results of majority vote of instances. The key is the index of instance,
the value is the label given by the oracle.
alpha: float, optional (default=0.05)
Used for calculating the critical value for the Student’s t-distribution with n−1
degrees of freedom at the alpha/2 confidence level.
Returns
-------
uia: float
The UI(a) value.
"""
n = len(self._oracles_iterset)
t_crit_val = scipy.stats.t.isf([alpha / 2], n - 1)[0]
reward_arr = []
for ind in oracle_history.keys():
if oracle_history[ind] == majority_vote_result[ind]:
reward_arr.append(1)
else:
reward_arr.append(0)
mean_a = np.mean(reward_arr)
std_a = np.std(reward_arr)
uia = mean_a + t_crit_val * std_a / np.sqrt(n)
return uia
| 498 | 531 |
"""
Pre-defined query strategy for noisy oracles.
In reality, the labels given by human is not always correct. For one hand,
there are some inevitable noise comes from the instrumentation of experimental
setting. On the other hand, people can become distracted or fatigued over time,
introducing variability in the quality of their annotations.
ALiPy implements several strategies in noisy oracles settings.
Some of then mainly evaluate the quality or expertise of each oracle,
and the rest tries to obtain the accurate label for each instance
whose labels are provided by several noisy oracles.
There are 2 categories of methods.
1. Query from a single selected oracle.
1.1 Always query from the best oracle
1.2 Query from the most appropriate oracle
according to the selected instance and label.
2. Query from multiple noisy oracles. Labels are obtained from multiple noisy oracles.
And the algorithm tries to obtain the accurate label for each instance.
Implement method:
1: CEAL (IJCAI'17)
2: IEthresh (KDD'09 Donmez)
Baselines:
Majority vote
Query from all oracles and majority vote
Random select an oracle
"""
# Authors: Ying-Peng Tang
# License: BSD 3 clause
from __future__ import division
import collections
from abc import ABCMeta, abstractmethod
import copy
import numpy as np
import scipy.stats
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import NearestNeighbors
from .base import BaseNoisyOracleQuery
from .query_labels import QueryInstanceUncertainty
from .query_labels import _get_proba_pred
from ..oracle import Oracles, Oracle
def majority_vote(labels, weight=None):
"""Perform majority vote to determine the true label from
multiple noisy oracles.
Parameters
----------
labels: list
A list with length=k, which contains the labels provided by
k noisy oracles.
weight: list, optional (default=None)
The weights of each oracle. It should have the same length with
labels.
Returns
-------
vote_count: int
The number of votes.
vote_result: object
The label of the selected_instance, produced by majority voting
of the selected oracles.
"""
oracle_weight = np.ones(len(labels)) if weight is None else weight
assert len(labels) == len(oracle_weight)
vote_result = collections.Counter(labels)
most_votes = vote_result.most_common(n=1)
return most_votes[0][1], most_votes[0][0]
def get_query_results(selected_instance, oracles, names=None):
"""Get the query results from oracles of the selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
names: list, optional (default=None)
A list of str which contains the names of oracles to query from.
If not provided, it will query from all oracles.
Each name should in oracles.names().
Returns
-------
query_labels: list
The queried labels.
query_costs: list
The total cost of query.
"""
costs = []
if isinstance(oracles, list):
oracle_type = 'list'
for oracle in oracles:
assert isinstance(oracle, Oracle)
elif isinstance(oracles, Oracles):
oracle_type = 'oracles'
else:
raise TypeError("The type of parameter oracles must be a list or alipy.oracle.Oracles object.")
labeling_results = []
if oracle_type == 'list':
for i in oracles.names() if oracle_type == 'oracles' else range(len(oracles)):
lab, co = oracles[i].query_by_index(selected_instance)
labeling_results.append(lab[0])
costs.append(np.sum(co))
else:
results = oracles.query_from_s(selected_instance, oracles_name=names)
labeling_results = [res[0][0] for res in results]
costs = [np.sum(res[1]) for res in results]
return labeling_results, costs
def get_majority_vote(selected_instance, oracles, names=None):
"""Get the majority vote results of the selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
names: list, optional (default=None)
A list of str which contains the names of oracles to query from.
If not provided, it will query from all oracles.
Each name should in oracles.names().
Returns
-------
vote_count: int
The number of votes.
vote_result: object
The label of the selected_instance, produced by majority voting
of the selected oracles.
query_costs: int
The total cost of query.
"""
labeling_results, cost = get_query_results(selected_instance, oracles, names)
majority_vote_result = majority_vote(labeling_results)
return majority_vote_result[0], majority_vote_result[1], np.sum(cost)
class QueryNoisyOraclesCEAL(BaseNoisyOracleQuery):
"""Cost-Effective Active Learning from Diverse Labelers (CEAL) method assumes
that different oracles have different expertise. Even the very noisy oracle
may perform well on some kind of examples. The cost of a labeler is proportional
to its overall labeling quality and it is thus necessary to query from the right oracle
according to the selected instance.
This method will select an instance-labeler pair (x, a), and queries the label of x
from a, where the selection of both the instance and labeler is based on a
evaluation function Q(x, a).
The selection of instance is depend on its uncertainty. The selection of oracle is
depend on the oracle's performance on the nearest neighbors of selected instance.
The cost of each oracle is proportional to its overall labeling quality.
Parameters
----------
X: 2D array, optional (default=None)
Feature matrix of the whole dataset. It is a reference which will not use additional memory.
y: array-like, optional (default=None)
Label matrix of the whole dataset. It is a reference which will not use additional memory.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
initial_labeled_indexes: {list, np.ndarray, IndexCollection}
The indexes of initially labeled samples. Used for initializing the scores of each oracle.
References
----------
[1] Sheng-Jun Huang, Jia-Lve Chen, Xin Mu, Zhi-Hua Zhou. 2017.
Cost-Effective Active Learning from Diverse Labelers. In The
Proceedings of the 26th International Joint Conference
on Artificial Intelligence (IJCAI-17), 1879-1885.
"""
def __init__(self, X, y, oracles, initial_labeled_indexes):
super(QueryNoisyOraclesCEAL, self).__init__(X, y, oracles=oracles)
# ytype = type_of_target(self.y)
# if 'multilabel' in ytype:
# warnings.warn("This query strategy does not support multi-label.",
# category=FunctionWarning)
assert (isinstance(initial_labeled_indexes, collections.Iterable))
self._ini_ind = np.asarray(initial_labeled_indexes)
# construct a nearest neighbor object implemented by scikit-learn
self._nntree = NearestNeighbors(metric='euclidean')
self._nntree.fit(self.X[self._ini_ind])
def select(self, label_index, unlabel_index, eval_cost=False, model=None, **kwargs):
"""Query from oracles. Return the index of selected instance and oracle.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
eval_cost: bool, optional (default=False)
To evaluate the cost of oracles or use the cost provided by oracles.
model: object, optional (default=None)
Current classification model, should have the 'predict_proba' method for probabilistic output.
If not provided, LogisticRegression with default parameters implemented by sklearn will be used.
n_neighbors: int, optional (default=10)
How many neighbors of the selected instance will be used
to evaluate the oracles.
Returns
-------
selected_instance: int
The index of selected instance.
selected_oracle: int or str
The index of selected oracle.
If a list is given, the index of oracle will be returned.
If a Oracles object is given, the oracle name will be returned.
"""
if model is None:
model = LogisticRegression(solver='liblinear')
model.fit(self.X[label_index], self.y[label_index])
pred_unlab, _ = _get_proba_pred(self.X[unlabel_index], model)
n_neighbors = min(kwargs.pop('n_neighbors', 10), len(self._ini_ind) - 1)
return self.select_by_prediction_mat(label_index, unlabel_index, pred_unlab,
n_neighbors=n_neighbors, eval_cost=eval_cost)
def select_by_prediction_mat(self, label_index, unlabel_index, predict, **kwargs):
"""Query from oracles. Return the index of selected instance and oracle.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
predict: : 2d array, shape [n_samples, n_classes]
The probabilistic prediction matrix for the unlabeled set.
n_neighbors: int, optional (default=10)
How many neighbors of the selected instance will be used
to evaluate the oracles.
eval_cost: bool, optional (default=False)
To evaluate the cost of oracles or use the cost provided by oracles.
Returns
-------
selected_instance: int
The index of selected instance.
selected_oracle: int or str
The index of selected oracle.
If a list is given, the index of oracle will be returned.
If a Oracles object is given, the oracle name will be returned.
"""
n_neighbors = min(kwargs.pop('n_neighbors', 10), len(self._ini_ind)-1)
eval_cost = kwargs.pop('n_neighbors', False)
Q_table, oracle_ind_name_dict = self._calc_Q_table(label_index, unlabel_index, self._oracles, predict,
n_neighbors=n_neighbors, eval_cost=eval_cost)
# get the instance-oracle pair
selected_pair = np.unravel_index(np.argmax(Q_table, axis=None), Q_table.shape)
sel_ora = oracle_ind_name_dict[selected_pair[0]]
if not isinstance(sel_ora, list):
sel_ora = [sel_ora]
return [unlabel_index[selected_pair[1]]], sel_ora
def _calc_Q_table(self, label_index, unlabel_index, oracles, pred_unlab, n_neighbors=10, eval_cost=False):
"""Query from oracles. Return the Q table and the oracle name/index of each row of Q_table.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
predict: : 2d array, shape [n_samples, n_classes]
The probabilistic prediction matrix for the unlabeled set.
n_neighbors: int, optional (default=10)
How many neighbors of the selected instance will be used
to evaluate the oracles.
eval_cost: bool, optional (default=False)
To evaluate the cost of oracles or use the cost provided by oracles.
Returns
-------
Q_table: 2D array
The Q table.
oracle_ind_name_dict: dict
The oracle name/index of each row of Q_table.
"""
# Check parameter and initialize variables
if self.X is None or self.y is None:
raise Exception('Data matrix is not provided, use select_by_prediction_mat() instead.')
assert (isinstance(unlabel_index, collections.Iterable))
assert (isinstance(label_index, collections.Iterable))
unlabel_index = np.asarray(unlabel_index)
label_index = np.asarray(label_index)
num_of_neighbors = n_neighbors
if len(unlabel_index) <= 1:
return unlabel_index
Q_table = np.zeros((len(oracles), len(unlabel_index))) # row:oracle, col:ins
spv = np.shape(pred_unlab)
# calc least_confident
rx = np.partition(pred_unlab, spv[1] - 1, axis=1)
rx = 1 - rx[:, spv[1] - 1]
for unlab_ind, unlab_ins_ind in enumerate(unlabel_index):
# evaluate oracles for each instance
nn_dist, nn_of_selected_ins = self._nntree.kneighbors(X=self.X[unlab_ins_ind].reshape(1, -1),
n_neighbors=num_of_neighbors,
return_distance=True)
nn_dist = nn_dist[0]
nn_of_selected_ins = nn_of_selected_ins[0]
nn_of_selected_ins = self._ini_ind[nn_of_selected_ins] # map to the original population
oracles_score = []
for ora_ind, ora_name in enumerate(self._oracles_iterset):
# calc q_i(x), expertise of this instance
oracle = oracles[ora_name]
labels, cost = oracle.query_by_index(nn_of_selected_ins)
oracles_score.append(sum([nn_dist[i] * (labels[i] == self.y[nn_of_selected_ins[i]]) for i in
range(num_of_neighbors)]) / num_of_neighbors)
# calc c_i, cost of each labeler
labels, cost = oracle.query_by_index(label_index)
if eval_cost:
oracles_cost = sum([labels[i] == self.y[label_index[i]] for i in range(len(label_index))]) / len(label_index)
else:
oracles_cost = cost[0]
Q_table[ora_ind, unlab_ind] = oracles_score[ora_ind] * rx[unlab_ind] / max(oracles_cost, 0.0001)
return Q_table, self._oracle_ind_name_dict
class QueryNoisyOraclesSelectInstanceUncertainty(BaseNoisyOracleQuery, metaclass=ABCMeta):
"""This class implement select and select_by_prediction_mat by uncertainty."""
def __init__(self, X=None, y=None, oracles=None):
super(QueryNoisyOraclesSelectInstanceUncertainty, self).__init__(X=X, y=y, oracles=oracles)
def select(self, label_index, unlabel_index, model=None, **kwargs):
"""Select an instance and a batch of oracles to label it.
The instance is selected by uncertainty, the oracles is
selected by the difference between their
labeling results and the majority vote results.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
Returns
-------
selected_instance: int
The index of selected instance. Selected by uncertainty.
selected_oracles: list
The selected oracles for querying.
"""
if model is None:
model = LogisticRegression(solver='liblinear')
model.fit(self.X[label_index], self.y[label_index])
pred_unlab, _ = _get_proba_pred(self.X[unlabel_index], model)
return self.select_by_prediction_mat(label_index, unlabel_index, pred_unlab)
def select_by_prediction_mat(self, label_index, unlabel_index, predict):
"""Query from oracles. Return the index of selected instance and oracle.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
predict: : 2d array, shape [n_samples, n_classes]
The probabilistic prediction matrix for the unlabeled set.
Returns
-------
selected_instance: int
The index of selected instance. Selected by uncertainty.
selected_oracles: list
The selected oracles for querying.
"""
# Check parameter and initialize variables
assert (isinstance(unlabel_index, collections.Iterable))
assert (isinstance(label_index, collections.Iterable))
unlabel_index = np.asarray(unlabel_index)
label_index = np.asarray(label_index)
if len(unlabel_index) <= 1:
return unlabel_index
# select instance and oracle
unc = QueryInstanceUncertainty(measure='least_confident')
selected_instance = unc.select_by_prediction_mat(unlabel_index=unlabel_index, predict=predict, batch_size=1)[0]
return [selected_instance], self.select_by_given_instance(selected_instance)
@abstractmethod
def select_by_given_instance(self, selected_instance):
pass
class QueryNoisyOraclesIEthresh(QueryNoisyOraclesSelectInstanceUncertainty):
"""IEthresh will select a batch of oracles to label the selected instance.
It will score for each oracle according to the difference between their
labeling results and the majority vote results.
At each iteration, a batch of oracles whose scores are larger than a threshold will be selected.
Oracle with a higher score is more likely to be selected.
Parameters
----------
X: 2D array, optional (default=None)
Feature matrix of the whole dataset. It is a reference which will not use additional memory.
y: array-like, optional (default=None)
Label matrix of the whole dataset. It is a reference which will not use additional memory.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
initial_labeled_indexes: {list, np.ndarray, IndexCollection}
The indexes of initially labeled samples. Used for initializing the scores of each oracle.
epsilon: float, optional (default=0.1)
The value to determine how many oracles will be selected.
S_t = {a|UI(a) >= epsilon * max UI(a)}
References
----------
[1] Donmez P , Carbonell J G , Schneider J . Efficiently learning the accuracy of labeling
sources for selective sampling.[C] ACM SIGKDD International Conference on
Knowledge Discovery & Data Mining. ACM, 2009.
"""
def __init__(self, X, y, oracles, initial_labeled_indexes, **kwargs):
super(QueryNoisyOraclesIEthresh, self).__init__(X, y, oracles=oracles)
self._ini_ind = np.asarray(initial_labeled_indexes)
# record the labeling history of each oracle
self._oracles_history = dict()
for i in range(len(self._oracles_iterset)):
self._oracles_history[i] = dict()
# record the results of majority vote
self._majority_vote_results = dict()
# calc initial QI(a) for each oracle a
self._UI = np.ones(len(self._oracles_iterset))
self.epsilon = kwargs.pop('epsilon', 0.8)
def _calc_uia(self, oracle_history, majority_vote_result, alpha=0.05):
"""Calculate the UI(a) by providing the labeling history and the majority vote results.
Parameters
----------
oracle_history: dict
The labeling history of an oracle. The key is the index of instance, the value is the
label given by the oracle.
majority_vote_result: dict
The results of majority vote of instances. The key is the index of instance,
the value is the label given by the oracle.
alpha: float, optional (default=0.05)
Used for calculating the critical value for the Student’s t-distribution with n−1
degrees of freedom at the alpha/2 confidence level.
Returns
-------
uia: float
The UI(a) value.
"""
n = len(self._oracles_iterset)
t_crit_val = scipy.stats.t.isf([alpha / 2], n - 1)[0]
reward_arr = []
for ind in oracle_history.keys():
if oracle_history[ind] == majority_vote_result[ind]:
reward_arr.append(1)
else:
reward_arr.append(0)
mean_a = np.mean(reward_arr)
std_a = np.std(reward_arr)
uia = mean_a + t_crit_val * std_a / np.sqrt(n)
return uia
def select_by_given_instance(self, selected_instance):
"""Select oracle to query by providing the index of selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
Returns
-------
selected_oracles: list
The selected oracles for querying.
"""
selected_oracles = np.nonzero(self._UI >= self.epsilon * np.max(self._UI))
selected_oracles = selected_oracles[0]
# update UI(a) for each selected oracle
labeling_results = []
for i in selected_oracles:
lab, _ = self._oracles[self._oracle_ind_name_dict[i]].query_by_index(selected_instance)
labeling_results.append(lab[0])
self._oracles_history[i][selected_instance] = copy.copy(lab[0])
_, majority_vote_result = majority_vote(labeling_results)
reward_arr = np.zeros(len(selected_oracles))
same_ind = np.nonzero(labeling_results == majority_vote_result)[0]
reward_arr[same_ind] = 1
self._majority_vote_results[selected_instance] = majority_vote_result
for i in selected_oracles:
self._UI[i] = self._calc_uia(self._oracles_history[i], self._majority_vote_results)
# return results
return [self._oracle_ind_name_dict[i] for i in selected_oracles]
class QueryNoisyOraclesAll(QueryNoisyOraclesSelectInstanceUncertainty):
"""This strategy will select instance by uncertainty and query from all
oracles and return the majority vote result.
Parameters
----------
X: 2D array, optional (default=None)
Feature matrix of the whole dataset. It is a reference which will not use additional memory.
y: array-like, optional (default=None)
Label matrix of the whole dataset. It is a reference which will not use additional memory.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
"""
def __init__(self, oracles, X=None, y=None):
super(QueryNoisyOraclesAll, self).__init__(X=X, y=y, oracles=oracles)
def select_by_given_instance(self, selected_instance):
"""Select oracle to query by providing the index of selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
Returns
-------
oracles_ind: list
The indexes of selected oracles.
"""
return self._oracle_ind_name_dict.values()
class QueryNoisyOraclesRandom(QueryNoisyOraclesSelectInstanceUncertainty):
"""Select a random oracle to query."""
def select_by_given_instance(self, selected_instance):
"""Select oracle to query by providing the index of selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
Returns
-------
oracles_ind: list
The indexes of selected oracles.
"""
return [self._oracle_ind_name_dict[np.random.randint(0, len(self._oracles), 1)[0]]]
|
add_node
|
add node method. Runs basic validation before adding.
:param dict node: dictionary of node's data
|
"""
Copyright 2019 by Adam Lewicki
This file is part of the Game Theory library,
and is released under the "MIT License Agreement". Please see the LICENSE
file that should have been included as part of this package.
"""
import json
# ======================================================================================================================
# game tree object
class GameTree:
# ---------------------------------- OBJECT PROPERTIES -------------------------------------------------------------
# procedure of printing object properties
def __repr__(self):
""" return tree as JSON serialized dictionary """
return self.pretty_print(self.__dict__)
@staticmethod
def pretty_print(dictionary: dict):
""" return pretty printed dictionary as JSON serialized object """
return json.dumps(dictionary, indent=4)
# initialize object
def __init__(self, nodes: dict = None, groups: dict = None, leafs: list = None, players_list: list = None):
"""
GameTree class used to represent game tree:
Attributes
----------
nodes : dict
dictionary of nodes;
groups : dict
dictionary of groups
leafs : list
list of leafs, calculated on demand
players_list: list
list of players names, indicating which game income from list is connected to which player
"""
'''
dictionary of nodes:
Attributes
----------
node : dict
dictionary representing node;
Attributes
----------
value : float
value of node (the prize for reaching the node)
parents : dict
parents of node - can be multiple, represented by dict of ids and connection values
children : dict
children of node - can be multiple, represented by dict of ids and connection values
probability : float
probability of node - 1 means there is no random choice
branch : dict
totals of branch, to avoid tree walking
Attributes
----------
value : float
total value of branch
probability : float
probability of reaching this node in game
'''
# remember to add new attributes to add_node method default values setting
self._nodes = {}
# dictionary of knowledge groups
self._groups = {} if groups is None else groups
# dictionary of leafs
self._leafs = [] if leafs is None else leafs
self._players_list = [] if players_list is None else players_list
# always add root
self.add_node({
'id': 'root',
'player': '1',
}) if nodes is None else nodes
# ---------------------------------- NODES -------------------------------------------------------------------------
# MASKED: add_node function (lines 84-139)
def add_vertex(self, id_: str, player: str, parents: dict):
"""
add vertex from simplified function:
:param str id_: id of the node
:param str player: id of player owning the node
:param dict parents: dictionary of parents for the node
"""
self.add_node({
'id': id_,
'player': player,
'parents': parents
})
def add_leaf(self, id_: str, value: list, parents: dict):
"""
add leaf from simplified function:
:param str id_: id of the node
:param list value: list of node's values
:param dict parents: dictionary of parents for the node
"""
self.add_node({
'id': id_,
'value': value,
'parents': parents
})
def copy_node(self, from_: str, to_: str):
"""
create a copy of node's properties in another node
:param str from_: origin node of properties
:param str to_: destination node for properties
"""
self._nodes[to_] = dict(self._nodes[from_])
def change_node(self, node: dict):
"""
change node method. Changes attributes provided in node dictionary
:param dict node: dictionary of node's data
"""
# check if it is not overriding existing node
if node.get('id') is not None:
if node['id'] not in self._nodes:
raise ValueError('tried to change non-existing node %s' % node['id'])
else:
raise ValueError('no id for node provided')
# change attributes
id_ = node['id']
del node['id']
for attribute in node:
self._nodes[id_][attribute] = node[attribute]
# ---------------------------------- OBJECT BASIC METHODS ----------------------------------------------------------
def get_parent(self, id_) -> str:
""" get id of the parent node """
return list(self._nodes[id_]['parents'].keys())[0]
def get_player_index(self, id_) -> int:
""" return player index from players list order """
return self._players_list.index(self._nodes[id_]['player'])
def get_path_to_node(self, id_: str, mode: str = 'nodes') -> list:
"""
get path from root to the node
:param str id_: id of the node you want to reach from root
:param str mode: mode of return type, 'nodes' - make path with nodes id, 'moves' - make path with player choices
"""
path_t = []
node = id_
while node is not 'root':
if mode == 'nodes':
path_t.insert(0, node)
elif mode == 'moves':
parent_ = self.get_parent(node)
path_t.insert(0, self._nodes[parent_]['children'][node])
else:
raise ValueError('mode variable is not "nodes" nor "moves"')
node = self.get_parent(node)
if mode == 'nodes':
path_t.insert(0, 'root')
return path_t
@staticmethod
def _get_key(obj: dict, val: str) -> list:
"""
get list of keys with specified value from obj dictionary
:param dict obj: chosen dictionary
:param str val: specified value
"""
sublist = [key for (key, value) in obj.items() if value == val]
if sublist:
return sublist
else:
raise ValueError('key with value %s does not exist in %s' % (val, obj))
def get_tree(self) -> dict:
""" return copy of tree nodes structure dict"""
return dict(self._nodes)
# -------------- LEAFS -------------
def calculate_leafs(self):
""" calculate inner list of leafs ids """
self._leafs = [node for node in self._nodes if not self._nodes[node]['children']]
def get_leafs(self) -> list:
""" return list of leafs ids. Will return empty list, if calculate_leafs() has not been called earlier. """
return self._leafs[:]
# -------------- GROUPS ------------
def set_group(self, id_: str, player: str, group: list):
"""
add list of ids to new group
:param str id_: id of group
:param str player: id of player owning the group
:param list group: list of id's you want to create group with
"""
self._groups[id_] = {
'player': player,
'group': group
}
def get_groups(self) -> dict:
""" return dictionary of groups """
return dict(self._groups)
def get_groups_of_player(self, player: str) -> list:
""" return list of all groups id's where player is the owner """
return [group for group in self._groups if self._groups[group]['player'] == player]
# ==================================================================================================================
|
def add_node(self, node: dict):
"""
add node method. Runs basic validation before adding.
:param dict node: dictionary of node's data
"""
# check if it is not overriding existing node
if node.get('id') is not None:
if node['id'] in self._nodes:
raise ValueError('tried to override node %s' % node['id'])
else:
raise ValueError('no id for node provided')
# append node to list
id_ = node['id']
del node['id']
# set default values for node
# remember to add new attributes here and in __init__ root node
node['player'] = '0' if node.get('player') is None else node['player']
node['value'] = [0, 0] if node.get('value') is None else node['value']
node['parents'] = {} if node.get('parents') is None else node['parents']
node['children'] = {} if node.get('children') is None else node['children']
node['probability'] = 1 if node.get('probability') is None else node['probability']
node['branch'] = {} if node.get('branch') is None else node['branch']
node['branch']['probability'] = 1 \
if node['branch'].get('probability') is None else node['branch']['probability']
# add player to the list of players if he is not there already
if node['player'] not in self._players_list:
self._players_list.append(node['player'])
# add parenthood
for parent in node['parents']:
# noinspection PyTypeChecker
self._nodes[parent]['children'][id_] = str(node['parents'][parent])
# set depth to one more than first parent
if node['parents']:
node['depth'] = self._nodes[str(list(node['parents'].keys())[0])]['depth'] + 1
else:
node['depth'] = 0 if node.get('depth') is None else node['depth']
# calculate total probability of node:
# total probability equals sum of probabilities of parents multiplied by probability of node
branch_probability = 0
for parent in node['parents']:
branch_probability += self._nodes[parent]['branch']['probability']
node['branch']['probability'] = branch_probability * node['probability']
# validate against the error of node not being connected to the rest of the tree via parents removal:
if id_ is not 'root' and not node['parents']:
raise ValueError('node [%s] is not connected to the tree - parents are empty' % id_)
# add node
self._nodes[id_] = node
| 84 | 139 |
"""
Copyright 2019 by Adam Lewicki
This file is part of the Game Theory library,
and is released under the "MIT License Agreement". Please see the LICENSE
file that should have been included as part of this package.
"""
import json
# ======================================================================================================================
# game tree object
class GameTree:
# ---------------------------------- OBJECT PROPERTIES -------------------------------------------------------------
# procedure of printing object properties
def __repr__(self):
""" return tree as JSON serialized dictionary """
return self.pretty_print(self.__dict__)
@staticmethod
def pretty_print(dictionary: dict):
""" return pretty printed dictionary as JSON serialized object """
return json.dumps(dictionary, indent=4)
# initialize object
def __init__(self, nodes: dict = None, groups: dict = None, leafs: list = None, players_list: list = None):
"""
GameTree class used to represent game tree:
Attributes
----------
nodes : dict
dictionary of nodes;
groups : dict
dictionary of groups
leafs : list
list of leafs, calculated on demand
players_list: list
list of players names, indicating which game income from list is connected to which player
"""
'''
dictionary of nodes:
Attributes
----------
node : dict
dictionary representing node;
Attributes
----------
value : float
value of node (the prize for reaching the node)
parents : dict
parents of node - can be multiple, represented by dict of ids and connection values
children : dict
children of node - can be multiple, represented by dict of ids and connection values
probability : float
probability of node - 1 means there is no random choice
branch : dict
totals of branch, to avoid tree walking
Attributes
----------
value : float
total value of branch
probability : float
probability of reaching this node in game
'''
# remember to add new attributes to add_node method default values setting
self._nodes = {}
# dictionary of knowledge groups
self._groups = {} if groups is None else groups
# dictionary of leafs
self._leafs = [] if leafs is None else leafs
self._players_list = [] if players_list is None else players_list
# always add root
self.add_node({
'id': 'root',
'player': '1',
}) if nodes is None else nodes
# ---------------------------------- NODES -------------------------------------------------------------------------
def add_node(self, node: dict):
"""
add node method. Runs basic validation before adding.
:param dict node: dictionary of node's data
"""
# check if it is not overriding existing node
if node.get('id') is not None:
if node['id'] in self._nodes:
raise ValueError('tried to override node %s' % node['id'])
else:
raise ValueError('no id for node provided')
# append node to list
id_ = node['id']
del node['id']
# set default values for node
# remember to add new attributes here and in __init__ root node
node['player'] = '0' if node.get('player') is None else node['player']
node['value'] = [0, 0] if node.get('value') is None else node['value']
node['parents'] = {} if node.get('parents') is None else node['parents']
node['children'] = {} if node.get('children') is None else node['children']
node['probability'] = 1 if node.get('probability') is None else node['probability']
node['branch'] = {} if node.get('branch') is None else node['branch']
node['branch']['probability'] = 1 \
if node['branch'].get('probability') is None else node['branch']['probability']
# add player to the list of players if he is not there already
if node['player'] not in self._players_list:
self._players_list.append(node['player'])
# add parenthood
for parent in node['parents']:
# noinspection PyTypeChecker
self._nodes[parent]['children'][id_] = str(node['parents'][parent])
# set depth to one more than first parent
if node['parents']:
node['depth'] = self._nodes[str(list(node['parents'].keys())[0])]['depth'] + 1
else:
node['depth'] = 0 if node.get('depth') is None else node['depth']
# calculate total probability of node:
# total probability equals sum of probabilities of parents multiplied by probability of node
branch_probability = 0
for parent in node['parents']:
branch_probability += self._nodes[parent]['branch']['probability']
node['branch']['probability'] = branch_probability * node['probability']
# validate against the error of node not being connected to the rest of the tree via parents removal:
if id_ is not 'root' and not node['parents']:
raise ValueError('node [%s] is not connected to the tree - parents are empty' % id_)
# add node
self._nodes[id_] = node
def add_vertex(self, id_: str, player: str, parents: dict):
"""
add vertex from simplified function:
:param str id_: id of the node
:param str player: id of player owning the node
:param dict parents: dictionary of parents for the node
"""
self.add_node({
'id': id_,
'player': player,
'parents': parents
})
def add_leaf(self, id_: str, value: list, parents: dict):
"""
add leaf from simplified function:
:param str id_: id of the node
:param list value: list of node's values
:param dict parents: dictionary of parents for the node
"""
self.add_node({
'id': id_,
'value': value,
'parents': parents
})
def copy_node(self, from_: str, to_: str):
"""
create a copy of node's properties in another node
:param str from_: origin node of properties
:param str to_: destination node for properties
"""
self._nodes[to_] = dict(self._nodes[from_])
def change_node(self, node: dict):
"""
change node method. Changes attributes provided in node dictionary
:param dict node: dictionary of node's data
"""
# check if it is not overriding existing node
if node.get('id') is not None:
if node['id'] not in self._nodes:
raise ValueError('tried to change non-existing node %s' % node['id'])
else:
raise ValueError('no id for node provided')
# change attributes
id_ = node['id']
del node['id']
for attribute in node:
self._nodes[id_][attribute] = node[attribute]
# ---------------------------------- OBJECT BASIC METHODS ----------------------------------------------------------
def get_parent(self, id_) -> str:
""" get id of the parent node """
return list(self._nodes[id_]['parents'].keys())[0]
def get_player_index(self, id_) -> int:
""" return player index from players list order """
return self._players_list.index(self._nodes[id_]['player'])
def get_path_to_node(self, id_: str, mode: str = 'nodes') -> list:
"""
get path from root to the node
:param str id_: id of the node you want to reach from root
:param str mode: mode of return type, 'nodes' - make path with nodes id, 'moves' - make path with player choices
"""
path_t = []
node = id_
while node is not 'root':
if mode == 'nodes':
path_t.insert(0, node)
elif mode == 'moves':
parent_ = self.get_parent(node)
path_t.insert(0, self._nodes[parent_]['children'][node])
else:
raise ValueError('mode variable is not "nodes" nor "moves"')
node = self.get_parent(node)
if mode == 'nodes':
path_t.insert(0, 'root')
return path_t
@staticmethod
def _get_key(obj: dict, val: str) -> list:
"""
get list of keys with specified value from obj dictionary
:param dict obj: chosen dictionary
:param str val: specified value
"""
sublist = [key for (key, value) in obj.items() if value == val]
if sublist:
return sublist
else:
raise ValueError('key with value %s does not exist in %s' % (val, obj))
def get_tree(self) -> dict:
""" return copy of tree nodes structure dict"""
return dict(self._nodes)
# -------------- LEAFS -------------
def calculate_leafs(self):
""" calculate inner list of leafs ids """
self._leafs = [node for node in self._nodes if not self._nodes[node]['children']]
def get_leafs(self) -> list:
""" return list of leafs ids. Will return empty list, if calculate_leafs() has not been called earlier. """
return self._leafs[:]
# -------------- GROUPS ------------
def set_group(self, id_: str, player: str, group: list):
"""
add list of ids to new group
:param str id_: id of group
:param str player: id of player owning the group
:param list group: list of id's you want to create group with
"""
self._groups[id_] = {
'player': player,
'group': group
}
def get_groups(self) -> dict:
""" return dictionary of groups """
return dict(self._groups)
def get_groups_of_player(self, player: str) -> list:
""" return list of all groups id's where player is the owner """
return [group for group in self._groups if self._groups[group]['player'] == player]
# ==================================================================================================================
|
get_ann_info
|
Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
|
import itertools
import logging
import os.path as osp
import tempfile
import mmcv
import numpy as np
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
try:
import pycocotools
assert pycocotools.__version__ >= '12.0.2'
except AssertionError:
raise AssertionError('Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
@DATASETS.register_module()
class CocoDatasetCar(CustomDataset):
CLASSES = ('small ship', 'small car', 'bus', 'truck', 'train')
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
return data_infos
# MASKED: get_ann_info function (lines 51-62)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = {}
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
|
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
| 51 | 62 |
import itertools
import logging
import os.path as osp
import tempfile
import mmcv
import numpy as np
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
try:
import pycocotools
assert pycocotools.__version__ >= '12.0.2'
except AssertionError:
raise AssertionError('Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
@DATASETS.register_module()
class CocoDatasetCar(CustomDataset):
CLASSES = ('small ship', 'small car', 'bus', 'truck', 'train')
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = {}
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
|
get_cat_ids
|
Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
|
import itertools
import logging
import os.path as osp
import tempfile
import mmcv
import numpy as np
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
try:
import pycocotools
assert pycocotools.__version__ >= '12.0.2'
except AssertionError:
raise AssertionError('Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
@DATASETS.register_module()
class CocoDatasetCar(CustomDataset):
CLASSES = ('small ship', 'small car', 'bus', 'truck', 'train')
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
# MASKED: get_cat_ids function (lines 64-75)
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = {}
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
|
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
| 64 | 75 |
import itertools
import logging
import os.path as osp
import tempfile
import mmcv
import numpy as np
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
try:
import pycocotools
assert pycocotools.__version__ >= '12.0.2'
except AssertionError:
raise AssertionError('Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
@DATASETS.register_module()
class CocoDatasetCar(CustomDataset):
CLASSES = ('small ship', 'small car', 'bus', 'truck', 'train')
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = {}
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
|
results2json
|
Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and values are corresponding filenames.
|
import itertools
import logging
import os.path as osp
import tempfile
import mmcv
import numpy as np
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
try:
import pycocotools
assert pycocotools.__version__ >= '12.0.2'
except AssertionError:
raise AssertionError('Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
@DATASETS.register_module()
class CocoDatasetCar(CustomDataset):
CLASSES = ('small ship', 'small car', 'bus', 'truck', 'train')
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
# MASKED: results2json function (lines 246-281)
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = {}
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
|
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
| 246 | 281 |
import itertools
import logging
import os.path as osp
import tempfile
import mmcv
import numpy as np
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
try:
import pycocotools
assert pycocotools.__version__ >= '12.0.2'
except AssertionError:
raise AssertionError('Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
@DATASETS.register_module()
class CocoDatasetCar(CustomDataset):
CLASSES = ('small ship', 'small car', 'bus', 'truck', 'train')
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = {}
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
|
format_results
|
Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing the json filepaths, tmp_dir is the temporal directory created for saving json files when jsonfile_prefix is not specified.
|
import itertools
import logging
import os.path as osp
import tempfile
import mmcv
import numpy as np
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
try:
import pycocotools
assert pycocotools.__version__ >= '12.0.2'
except AssertionError:
raise AssertionError('Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
@DATASETS.register_module()
class CocoDatasetCar(CustomDataset):
CLASSES = ('small ship', 'small car', 'bus', 'truck', 'train')
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
# MASKED: format_results function (lines 307-331)
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = {}
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
|
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
| 307 | 331 |
import itertools
import logging
import os.path as osp
import tempfile
import mmcv
import numpy as np
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
try:
import pycocotools
assert pycocotools.__version__ >= '12.0.2'
except AssertionError:
raise AssertionError('Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
@DATASETS.register_module()
class CocoDatasetCar(CustomDataset):
CLASSES = ('small ship', 'small car', 'bus', 'truck', 'train')
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = {}
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
|
startup
|
Start up Django and Lino.
Optional `settings_module` is the name of a Django settings
module. If this is specified, set the
:envvar:`DJANGO_SETTINGS_MODULE` environment variable.
This is called automatically when a process is invoked by an
*admin command*.
In a document to be tested using :cmd:`doctest` you need to call
it manually using e.g.:
>>> import lino
>>> lino.startup('my.project.settings')
Above two lines are recommended over the old-style method (the
only one only until Django 1.6)::
>>> import os
>>> os.environ['DJANGO_SETTINGS_MODULE'] = 'my.project.settings'
|
# -*- coding: UTF-8 -*-
# Copyright 2002-2019 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""
See :ref:`lino` for non-technical documentation.
The :mod:`lino` package itself is the first plugin for all Lino
applications, added automatically to your :setting:`INSTALLED_APPS`. It defines
no models, but some template files, django admin commands, translation messages
and the core :xfile:`help_texts.py` file.
The :mod:`lino` package is the root for the subpackages that define core
functionalites:
.. autosummary::
:toctree:
core
hello
api
utils
mixins
projects
modlib
sphinxcontrib
management.commands
"""
# from __future__ import unicode_literals
# from __future__ import absolute_import
# from builtins import str
import sys
import os
from os.path import join, dirname
from .setup_info import SETUP_INFO
__version__ = SETUP_INFO['version']
intersphinx_urls = dict(docs="http://core.lino-framework.org")
srcref_url = 'https://github.com/lino-framework/lino/blob/master/%s'
# srcref_url = 'https://github.com/lino-framework/lino/tree/master/%s'
doc_trees = ['docs']
if sys.version_info[0] > 2:
PYAFTER26 = True
elif sys.version_info[0] == 2 and sys.version_info[1] > 6:
PYAFTER26 = True
else:
PYAFTER26 = False
import warnings
warnings.filterwarnings(
"error", "DateTimeField .* received a naive datetime (.*) while time zone support is active.",
RuntimeWarning, "django.db.models.fields")
from django.conf import settings
from django.apps import AppConfig
# def setup_project(settings_module):
# os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
# from lino.api.shell import settings
DJANGO_DEFAULT_LANGUAGE = 'en-us'
def assert_django_code(django_code):
if '_' in django_code:
raise Exception("Invalid language code %r. "
"Use values like 'en' or 'en-us'." % django_code)
from django import VERSION
AFTER17 = True
AFTER18 = True
DJANGO2 = True
if VERSION[0] == 1:
DJANGO2 = False
if VERSION[1] < 10:
raise Exception("Unsupported Django version %s" % VERSION)
# if VERSION[1] > 6:
# AFTER17 = True
# if VERSION[1] > 8:
# AFTER18 = True
elif VERSION[0] == 2:
AFTER17 = True
AFTER18 = True
else:
raise Exception("Unsupported Django version %s" % VERSION)
# MASKED: startup function (lines 98-125)
class AppConfig(AppConfig):
"""This is the only :class:`django.apps.AppConfig` object used by
Lino.
Lino applications use the :class:`lino.core.plugins.Plugin`
because it has some additional functionality.
"""
name = 'lino'
def ready(self):
if False:
settings.SITE.startup()
else:
try:
settings.SITE.startup()
except ImportError as e:
import traceback
# traceback.print_exc(e)
# sys.exit(-1)
raise Exception("ImportError during startup:\n" +
traceback.format_exc(e))
except Exception as e:
print(e)
raise
default_app_config = 'lino.AppConfig'
# deprecated use, only for backwards compat:
from django.utils.translation import ugettext_lazy as _
|
def startup(settings_module=None):
"""
Start up Django and Lino.
Optional `settings_module` is the name of a Django settings
module. If this is specified, set the
:envvar:`DJANGO_SETTINGS_MODULE` environment variable.
This is called automatically when a process is invoked by an
*admin command*.
In a document to be tested using :cmd:`doctest` you need to call
it manually using e.g.:
>>> import lino
>>> lino.startup('my.project.settings')
Above two lines are recommended over the old-style method (the
only one only until Django 1.6)::
>>> import os
>>> os.environ['DJANGO_SETTINGS_MODULE'] = 'my.project.settings'
"""
if settings_module:
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
import django
django.setup()
| 98 | 125 |
# -*- coding: UTF-8 -*-
# Copyright 2002-2019 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""
See :ref:`lino` for non-technical documentation.
The :mod:`lino` package itself is the first plugin for all Lino
applications, added automatically to your :setting:`INSTALLED_APPS`. It defines
no models, but some template files, django admin commands, translation messages
and the core :xfile:`help_texts.py` file.
The :mod:`lino` package is the root for the subpackages that define core
functionalites:
.. autosummary::
:toctree:
core
hello
api
utils
mixins
projects
modlib
sphinxcontrib
management.commands
"""
# from __future__ import unicode_literals
# from __future__ import absolute_import
# from builtins import str
import sys
import os
from os.path import join, dirname
from .setup_info import SETUP_INFO
__version__ = SETUP_INFO['version']
intersphinx_urls = dict(docs="http://core.lino-framework.org")
srcref_url = 'https://github.com/lino-framework/lino/blob/master/%s'
# srcref_url = 'https://github.com/lino-framework/lino/tree/master/%s'
doc_trees = ['docs']
if sys.version_info[0] > 2:
PYAFTER26 = True
elif sys.version_info[0] == 2 and sys.version_info[1] > 6:
PYAFTER26 = True
else:
PYAFTER26 = False
import warnings
warnings.filterwarnings(
"error", "DateTimeField .* received a naive datetime (.*) while time zone support is active.",
RuntimeWarning, "django.db.models.fields")
from django.conf import settings
from django.apps import AppConfig
# def setup_project(settings_module):
# os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
# from lino.api.shell import settings
DJANGO_DEFAULT_LANGUAGE = 'en-us'
def assert_django_code(django_code):
if '_' in django_code:
raise Exception("Invalid language code %r. "
"Use values like 'en' or 'en-us'." % django_code)
from django import VERSION
AFTER17 = True
AFTER18 = True
DJANGO2 = True
if VERSION[0] == 1:
DJANGO2 = False
if VERSION[1] < 10:
raise Exception("Unsupported Django version %s" % VERSION)
# if VERSION[1] > 6:
# AFTER17 = True
# if VERSION[1] > 8:
# AFTER18 = True
elif VERSION[0] == 2:
AFTER17 = True
AFTER18 = True
else:
raise Exception("Unsupported Django version %s" % VERSION)
def startup(settings_module=None):
"""
Start up Django and Lino.
Optional `settings_module` is the name of a Django settings
module. If this is specified, set the
:envvar:`DJANGO_SETTINGS_MODULE` environment variable.
This is called automatically when a process is invoked by an
*admin command*.
In a document to be tested using :cmd:`doctest` you need to call
it manually using e.g.:
>>> import lino
>>> lino.startup('my.project.settings')
Above two lines are recommended over the old-style method (the
only one only until Django 1.6)::
>>> import os
>>> os.environ['DJANGO_SETTINGS_MODULE'] = 'my.project.settings'
"""
if settings_module:
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
import django
django.setup()
class AppConfig(AppConfig):
"""This is the only :class:`django.apps.AppConfig` object used by
Lino.
Lino applications use the :class:`lino.core.plugins.Plugin`
because it has some additional functionality.
"""
name = 'lino'
def ready(self):
if False:
settings.SITE.startup()
else:
try:
settings.SITE.startup()
except ImportError as e:
import traceback
# traceback.print_exc(e)
# sys.exit(-1)
raise Exception("ImportError during startup:\n" +
traceback.format_exc(e))
except Exception as e:
print(e)
raise
default_app_config = 'lino.AppConfig'
# deprecated use, only for backwards compat:
from django.utils.translation import ugettext_lazy as _
|
last_executed_query
|
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
|
import decimal
from threading import local
from django.db import DEFAULT_DB_ALIAS
from django.db.backends import util
from django.utils import datetime_safe
from django.utils.importlib import import_module
class BaseDatabaseWrapper(local):
"""
Represents a database connection.
"""
ops = None
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS):
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.connection = None
self.queries = []
self.settings_dict = settings_dict
self.alias = alias
def __eq__(self, other):
return self.settings_dict == other.settings_dict
def __ne__(self, other):
return not self == other
def _commit(self):
if self.connection is not None:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
return self.connection.rollback()
def _enter_transaction_management(self, managed):
"""
A hook for backend-specific changes required when entering manual
transaction handling.
"""
pass
def _leave_transaction_management(self, managed):
"""
A hook for backend-specific changes required when leaving manual
transaction handling. Will usually be implemented only when
_enter_transaction_management() is also required.
"""
pass
def _savepoint(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_commit_sql(sid))
def close(self):
if self.connection is not None:
self.connection.close()
self.connection = None
def cursor(self):
from django.conf import settings
cursor = self._cursor()
if settings.DEBUG:
return self.make_debug_cursor(cursor)
return cursor
def make_debug_cursor(self, cursor):
return util.CursorDebugWrapper(cursor, self)
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
# True if django.db.backend.utils.typecast_timestamp is used on values
# returned from dates() calls.
needs_datetime_string_cast = True
empty_fetchmany_value = []
update_can_self_select = True
interprets_empty_strings_as_nulls = False
can_use_chunked_reads = True
can_return_id_from_insert = False
uses_autocommit = False
uses_savepoints = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
def __init__(self):
self._cache = {}
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError()
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a DATE object with only
the given specificity.
"""
raise NotImplementedError()
def datetime_cast_sql(self):
"""
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name.
"""
return "%s"
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
# MASKED: last_executed_query function (lines 196-215)
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if compiler_name not in self._cache:
self._cache[compiler_name] = getattr(
import_module(self.compiler_module), compiler_name
)
return self._cache[compiler_name]
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError()
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
raise NotImplementedError
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
raise NotImplementedError
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
raise NotImplementedError
def sql_flush(self, style, tables, sequences):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
raise NotImplementedError()
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
from django.utils.encoding import smart_unicode
return smart_unicode(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def value_to_db_date(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return datetime_safe.new_date(value).strftime('%Y-%m-%d')
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_time(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
return util.format_number(value, max_digits, decimal_places)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.999999'
return [first % value, second % value]
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year lookup
`value` is an int, containing the looked-up year.
By default, it just calls `self.year_lookup_bounds`. Some backends need
this hook because on their DB date fields can't be compared to values
which include a time part.
"""
return self.year_lookup_bounds(value)
def convert_values(self, value, field):
"""Coerce the value returned by the database backend into a consistent type that
is compatible with the field type.
"""
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return value
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type in ('DateField', 'DateTimeField', 'TimeField'):
return value
# No field, or the field isn't known to be a decimal or integer
# Default to a float
return float(value)
def check_aggregate_support(self, aggregate_func):
"""Check that the backend supports the provided aggregate
This is used on specific backends to rule out known aggregates
that are known to have faulty implementations. If the named
aggregate function has a known problem, the backend should
raise NotImplemented.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
class BaseDatabaseIntrospection(object):
"""
This class encapsulates all backend-specific introspection utilities
"""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def table_names(self):
"Returns a list of names of all tables that exist in the database."
cursor = self.connection.cursor()
return self.get_table_list(cursor)
def django_table_names(self, only_existing=False):
"""
Returns a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, the resulting list will only include the tables
that actually exist in the database.
"""
from django.db import models, router
tables = set()
for app in models.get_apps():
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
tables.add(model._meta.db_table)
tables.update([f.m2m_db_table() for f in model._meta.local_many_to_many])
if only_existing:
tables = [t for t in tables if self.table_name_converter(t) in self.table_names()]
return tables
def installed_models(self, tables):
"Returns a set of all models represented by the provided list of table names."
from django.db import models, router
all_models = []
for app in models.get_apps():
for model in models.get_models(app):
if router.allow_syncdb(self.connection.alias, model):
all_models.append(model)
return set([m for m in all_models
if self.table_name_converter(m._meta.db_table) in map(self.table_name_converter, tables)
])
def sequence_list(self):
"Returns a list of information about all DB sequences for all models in all apps."
from django.db import models, router
apps = models.get_apps()
sequence_list = []
for app in apps:
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
sequence_list.append({'table': model._meta.db_table, 'column': f.column})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.rel.through is None:
sequence_list.append({'table': f.m2m_db_table(), 'column': None})
return sequence_list
class BaseDatabaseClient(object):
"""
This class encapsulates all backend-specific methods for opening a
client shell.
"""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def runshell(self):
raise NotImplementedError()
class BaseDatabaseValidation(object):
"""
This class encapsualtes all backend-specific model validation.
"""
def __init__(self, connection):
self.connection = connection
def validate_field(self, errors, opts, f):
"By default, there is no backend-specific validation"
pass
|
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
from django.utils.encoding import smart_unicode, force_unicode
# Convert params to contain Unicode values.
to_unicode = lambda s: force_unicode(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple([to_unicode(val) for val in params])
else:
u_params = dict([(to_unicode(k), to_unicode(v)) for k, v in params.items()])
return smart_unicode(sql) % u_params
| 196 | 215 |
import decimal
from threading import local
from django.db import DEFAULT_DB_ALIAS
from django.db.backends import util
from django.utils import datetime_safe
from django.utils.importlib import import_module
class BaseDatabaseWrapper(local):
"""
Represents a database connection.
"""
ops = None
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS):
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.connection = None
self.queries = []
self.settings_dict = settings_dict
self.alias = alias
def __eq__(self, other):
return self.settings_dict == other.settings_dict
def __ne__(self, other):
return not self == other
def _commit(self):
if self.connection is not None:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
return self.connection.rollback()
def _enter_transaction_management(self, managed):
"""
A hook for backend-specific changes required when entering manual
transaction handling.
"""
pass
def _leave_transaction_management(self, managed):
"""
A hook for backend-specific changes required when leaving manual
transaction handling. Will usually be implemented only when
_enter_transaction_management() is also required.
"""
pass
def _savepoint(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_commit_sql(sid))
def close(self):
if self.connection is not None:
self.connection.close()
self.connection = None
def cursor(self):
from django.conf import settings
cursor = self._cursor()
if settings.DEBUG:
return self.make_debug_cursor(cursor)
return cursor
def make_debug_cursor(self, cursor):
return util.CursorDebugWrapper(cursor, self)
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
# True if django.db.backend.utils.typecast_timestamp is used on values
# returned from dates() calls.
needs_datetime_string_cast = True
empty_fetchmany_value = []
update_can_self_select = True
interprets_empty_strings_as_nulls = False
can_use_chunked_reads = True
can_return_id_from_insert = False
uses_autocommit = False
uses_savepoints = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
def __init__(self):
self._cache = {}
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError()
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a DATE object with only
the given specificity.
"""
raise NotImplementedError()
def datetime_cast_sql(self):
"""
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name.
"""
return "%s"
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
from django.utils.encoding import smart_unicode, force_unicode
# Convert params to contain Unicode values.
to_unicode = lambda s: force_unicode(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple([to_unicode(val) for val in params])
else:
u_params = dict([(to_unicode(k), to_unicode(v)) for k, v in params.items()])
return smart_unicode(sql) % u_params
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if compiler_name not in self._cache:
self._cache[compiler_name] = getattr(
import_module(self.compiler_module), compiler_name
)
return self._cache[compiler_name]
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError()
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
raise NotImplementedError
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
raise NotImplementedError
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
raise NotImplementedError
def sql_flush(self, style, tables, sequences):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
raise NotImplementedError()
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
from django.utils.encoding import smart_unicode
return smart_unicode(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def value_to_db_date(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return datetime_safe.new_date(value).strftime('%Y-%m-%d')
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_time(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
return util.format_number(value, max_digits, decimal_places)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.999999'
return [first % value, second % value]
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year lookup
`value` is an int, containing the looked-up year.
By default, it just calls `self.year_lookup_bounds`. Some backends need
this hook because on their DB date fields can't be compared to values
which include a time part.
"""
return self.year_lookup_bounds(value)
def convert_values(self, value, field):
"""Coerce the value returned by the database backend into a consistent type that
is compatible with the field type.
"""
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return value
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type in ('DateField', 'DateTimeField', 'TimeField'):
return value
# No field, or the field isn't known to be a decimal or integer
# Default to a float
return float(value)
def check_aggregate_support(self, aggregate_func):
"""Check that the backend supports the provided aggregate
This is used on specific backends to rule out known aggregates
that are known to have faulty implementations. If the named
aggregate function has a known problem, the backend should
raise NotImplemented.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
class BaseDatabaseIntrospection(object):
"""
This class encapsulates all backend-specific introspection utilities
"""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def table_names(self):
"Returns a list of names of all tables that exist in the database."
cursor = self.connection.cursor()
return self.get_table_list(cursor)
def django_table_names(self, only_existing=False):
"""
Returns a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, the resulting list will only include the tables
that actually exist in the database.
"""
from django.db import models, router
tables = set()
for app in models.get_apps():
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
tables.add(model._meta.db_table)
tables.update([f.m2m_db_table() for f in model._meta.local_many_to_many])
if only_existing:
tables = [t for t in tables if self.table_name_converter(t) in self.table_names()]
return tables
def installed_models(self, tables):
"Returns a set of all models represented by the provided list of table names."
from django.db import models, router
all_models = []
for app in models.get_apps():
for model in models.get_models(app):
if router.allow_syncdb(self.connection.alias, model):
all_models.append(model)
return set([m for m in all_models
if self.table_name_converter(m._meta.db_table) in map(self.table_name_converter, tables)
])
def sequence_list(self):
"Returns a list of information about all DB sequences for all models in all apps."
from django.db import models, router
apps = models.get_apps()
sequence_list = []
for app in apps:
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
sequence_list.append({'table': model._meta.db_table, 'column': f.column})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.rel.through is None:
sequence_list.append({'table': f.m2m_db_table(), 'column': None})
return sequence_list
class BaseDatabaseClient(object):
"""
This class encapsulates all backend-specific methods for opening a
client shell.
"""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def runshell(self):
raise NotImplementedError()
class BaseDatabaseValidation(object):
"""
This class encapsualtes all backend-specific model validation.
"""
def __init__(self, connection):
self.connection = connection
def validate_field(self, errors, opts, f):
"By default, there is no backend-specific validation"
pass
|
extract_each_layer
|
This image processing funtion is designed for the OCT image post processing.
It can remove the small regions and find the OCT layer boundary under the specified threshold.
:param image:
:param threshold:
:return:
|
import os
import scipy.misc as misc
import shutil
import cv2
import Constants
import numpy as np
from skimage import morphology
# MASKED: extract_each_layer function (lines 11-45)
if __name__ == '__main__':
image_path = '/home/jimmyliu/Zaiwang/crop-OCT/train/562.fds/crop-images/' \
'oct202.png'
gt_path = '/home/jimmyliu/Zaiwang/crop-OCT/train/562.fds/crop-gt/' \
'oct202.png'
image = cv2.imread(image_path)
gt = cv2.imread(gt_path, cv2.IMREAD_GRAYSCALE)
cv2.imwrite('gt.png', gt)
print(np.max(image), np.shape(image))
print(np.max(gt), np.shape(gt))
|
def extract_each_layer(image, threshold):
"""
This image processing funtion is designed for the OCT image post processing.
It can remove the small regions and find the OCT layer boundary under the specified threshold.
:param image:
:param threshold:
:return:
"""
# convert the output to the binary image
ret, binary = cv2.threshold(image, threshold, 1, cv2.THRESH_BINARY)
bool_binary = np.array(binary, bool)
# remove the small object
remove_binary = morphology.remove_small_objects(bool_binary, min_size=25000,
connectivity=2,
in_place=False)
c = np.multiply(bool_binary, remove_binary)
final_binary = np.zeros(shape=np.shape(binary))
final_binary[c == True] = 1
binary_image = cv2.filter2D(final_binary, -1, np.array([[-1], [1]]))
layer_one = np.zeros(shape=[1, np.shape(binary_image)[1]])
for i in range(np.shape(binary_image)[1]):
location_point = np.where(binary_image[:, i] > 0)[0]
# print(location_point)
if len(location_point) == 1:
layer_one[0, i] = location_point
elif len(location_point) == 0:
layer_one[0, i] = layer_one[0, i-1]
else:
layer_one[0, i] = location_point[0]
return layer_one
| 11 | 45 |
import os
import scipy.misc as misc
import shutil
import cv2
import Constants
import numpy as np
from skimage import morphology
def extract_each_layer(image, threshold):
"""
This image processing funtion is designed for the OCT image post processing.
It can remove the small regions and find the OCT layer boundary under the specified threshold.
:param image:
:param threshold:
:return:
"""
# convert the output to the binary image
ret, binary = cv2.threshold(image, threshold, 1, cv2.THRESH_BINARY)
bool_binary = np.array(binary, bool)
# remove the small object
remove_binary = morphology.remove_small_objects(bool_binary, min_size=25000,
connectivity=2,
in_place=False)
c = np.multiply(bool_binary, remove_binary)
final_binary = np.zeros(shape=np.shape(binary))
final_binary[c == True] = 1
binary_image = cv2.filter2D(final_binary, -1, np.array([[-1], [1]]))
layer_one = np.zeros(shape=[1, np.shape(binary_image)[1]])
for i in range(np.shape(binary_image)[1]):
location_point = np.where(binary_image[:, i] > 0)[0]
# print(location_point)
if len(location_point) == 1:
layer_one[0, i] = location_point
elif len(location_point) == 0:
layer_one[0, i] = layer_one[0, i-1]
else:
layer_one[0, i] = location_point[0]
return layer_one
if __name__ == '__main__':
image_path = '/home/jimmyliu/Zaiwang/crop-OCT/train/562.fds/crop-images/' \
'oct202.png'
gt_path = '/home/jimmyliu/Zaiwang/crop-OCT/train/562.fds/crop-gt/' \
'oct202.png'
image = cv2.imread(image_path)
gt = cv2.imread(gt_path, cv2.IMREAD_GRAYSCALE)
cv2.imwrite('gt.png', gt)
print(np.max(image), np.shape(image))
print(np.max(gt), np.shape(gt))
|
classification_report
|
Create a report on classification statistics.
Parameters
----------
Y_hat : array-like, shape=(n_samples,)
List of data labels.
Y : array-like, shape=(n_samples,)
List of target truth labels.
beta : float, default=1
Strength of recall relative to precision in the F-score.
Returns
-------
report : dict
Dictionary containing classification statistics in the following
structure:
- 'label': {
'precision':0.5,
'recall':1.0,
'f-score':0.67,
'support':1
},
...
- 'beta': 1,
- 'support': 5,
- 'accuracy': 0.8,
- 'macro avg': {
'precision':0.6,
'recall':0.9,
'f-score':0.67,
},
- 'weighted avg': {
'precision':0.67,
'recall':0.9,
'f-score':0.67,
}
|
"""Classification Report"""
# Authors: Jeffrey Wang
# License: BSD 3 clause
import numpy as np
from sleepens.analysis import multiconfusion_matrix
def calculate_statistics(Y_hat, Y, beta=1, average=None):
"""
Calculate the precisions, recalls, F-beta scores, and
supports for each class in `targets`.
Parameters
----------
Y_hat : array-like, shape=(n_samples,)
List of data labels.
Y : array-like, shape=(n_samples,)
List of target truth labels.
beta : float, default=1
Strength of recall relative to precision in the F-score.
average : {'micro', 'macro', 'weighted', None}, default=None
The type of averaging to perform on statistics. Must be one of:
- None : Do not perform averaging, statistics for each class
are returned.
- 'micro' : Calculate globally, counting total true positives,
false negatives, and false positives.
- 'macro' : Calculate per class an unweighted mean.
- 'weighted' : Calculate per class the mean weighted by support.
Returns
-------
precisions : float or dict
Dictionary of precisions for each class if `average` is None.
Averaged precision based on averaging method if provided.
recalls : float or dict
Dictionary of recalls for each class if `average` is None.
Averaged recall based on averaging method if provided.
fscores : float or dict
Dictionary of fscores for each class if `average` is None.
Averaged fscore based on averaging method if provided.
supports : float or dict
Dictionary of supports for each class if `average` is None.
Total support (number of classes) if averaging method is provided.
"""
if beta < 0:
raise ValueError("Beta must be non-negative")
matrix = multiconfusion_matrix(Y_hat, Y)
matrix_labels = list(matrix.keys())
matrix = np.array([matrix[l] for l in matrix_labels])
tp_sum = matrix[:,1,1]
label_sum = tp_sum + matrix[:,0,1]
target_sum = tp_sum + matrix[:,1,0]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
label_sum = np.array([label_sum.sum()])
target_sum = np.array([target_sum.sum()])
with np.errstate(divide='ignore', invalid='ignore'):
precisions = np.divide(tp_sum, label_sum,
out=np.zeros(tp_sum.shape, dtype=float),
where=label_sum!=0)
recalls = np.divide(tp_sum, target_sum,
out=np.zeros(tp_sum.shape, dtype=float),
where=target_sum!=0)
if np.isposinf(beta):
fscores = recalls
else:
beta2 = beta ** 2
denom = beta2 * precisions + recalls
valid = np.where(denom != 0)[0]
fscores = np.zeros_like(denom)
fscores[valid] = (1 + beta2) * precisions[valid] * recalls[valid] / denom[valid]
if average == 'weighted':
weights = target_sum
if target_sum.sum() == 0:
return 0, 0, 0, target_sum.sum()
else:
weights = None
if average is not None:
precisions = np.average(precisions, weights=weights)
recalls = np.average(recalls, weights=weights)
fscores = np.average(fscores, weights=weights)
supports = target_sum.sum()
else:
precisions = {matrix_labels[k]: precisions[k] for k in range(len(matrix_labels))}
recalls = {matrix_labels[k]: recalls[k] for k in range(len(matrix_labels))}
fscores = {matrix_labels[k]: fscores[k] for k in range(len(matrix_labels))}
supports = {matrix_labels[k]: target_sum[k] for k in range(len(matrix_labels))}
return precisions, recalls, fscores, supports
# MASKED: classification_report function (lines 98-154)
|
def classification_report(Y_hat, Y, beta=1):
"""
Create a report on classification statistics.
Parameters
----------
Y_hat : array-like, shape=(n_samples,)
List of data labels.
Y : array-like, shape=(n_samples,)
List of target truth labels.
beta : float, default=1
Strength of recall relative to precision in the F-score.
Returns
-------
report : dict
Dictionary containing classification statistics in the following
structure:
- 'label': {
'precision':0.5,
'recall':1.0,
'f-score':0.67,
'support':1
},
...
- 'beta': 1,
- 'support': 5,
- 'accuracy': 0.8,
- 'macro avg': {
'precision':0.6,
'recall':0.9,
'f-score':0.67,
},
- 'weighted avg': {
'precision':0.67,
'recall':0.9,
'f-score':0.67,
}
"""
stats = calculate_statistics(Y_hat, Y, beta=beta)
_, _, accuracy, total = calculate_statistics(Y_hat, Y, beta=beta, average='micro')
macro = calculate_statistics(Y_hat, Y, beta=beta, average='macro')
weighted = calculate_statistics(Y_hat, Y, beta=beta, average='weighted')
h = ['precision', 'recall', 'f-score', 'support']
report = {
'beta': beta,
'support': total,
'accuracy': accuracy,
'macro avg': {h[i]: macro[i] for i in range(len(h))},
'weighted avg': {h[i]: weighted[i] for i in range(len(h))}
}
classes = set(stats[0].keys())
for c in classes:
report[c] = {h[i]: stats[i][c] for i in range(len(h))}
return report
| 98 | 154 |
"""Classification Report"""
# Authors: Jeffrey Wang
# License: BSD 3 clause
import numpy as np
from sleepens.analysis import multiconfusion_matrix
def calculate_statistics(Y_hat, Y, beta=1, average=None):
"""
Calculate the precisions, recalls, F-beta scores, and
supports for each class in `targets`.
Parameters
----------
Y_hat : array-like, shape=(n_samples,)
List of data labels.
Y : array-like, shape=(n_samples,)
List of target truth labels.
beta : float, default=1
Strength of recall relative to precision in the F-score.
average : {'micro', 'macro', 'weighted', None}, default=None
The type of averaging to perform on statistics. Must be one of:
- None : Do not perform averaging, statistics for each class
are returned.
- 'micro' : Calculate globally, counting total true positives,
false negatives, and false positives.
- 'macro' : Calculate per class an unweighted mean.
- 'weighted' : Calculate per class the mean weighted by support.
Returns
-------
precisions : float or dict
Dictionary of precisions for each class if `average` is None.
Averaged precision based on averaging method if provided.
recalls : float or dict
Dictionary of recalls for each class if `average` is None.
Averaged recall based on averaging method if provided.
fscores : float or dict
Dictionary of fscores for each class if `average` is None.
Averaged fscore based on averaging method if provided.
supports : float or dict
Dictionary of supports for each class if `average` is None.
Total support (number of classes) if averaging method is provided.
"""
if beta < 0:
raise ValueError("Beta must be non-negative")
matrix = multiconfusion_matrix(Y_hat, Y)
matrix_labels = list(matrix.keys())
matrix = np.array([matrix[l] for l in matrix_labels])
tp_sum = matrix[:,1,1]
label_sum = tp_sum + matrix[:,0,1]
target_sum = tp_sum + matrix[:,1,0]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
label_sum = np.array([label_sum.sum()])
target_sum = np.array([target_sum.sum()])
with np.errstate(divide='ignore', invalid='ignore'):
precisions = np.divide(tp_sum, label_sum,
out=np.zeros(tp_sum.shape, dtype=float),
where=label_sum!=0)
recalls = np.divide(tp_sum, target_sum,
out=np.zeros(tp_sum.shape, dtype=float),
where=target_sum!=0)
if np.isposinf(beta):
fscores = recalls
else:
beta2 = beta ** 2
denom = beta2 * precisions + recalls
valid = np.where(denom != 0)[0]
fscores = np.zeros_like(denom)
fscores[valid] = (1 + beta2) * precisions[valid] * recalls[valid] / denom[valid]
if average == 'weighted':
weights = target_sum
if target_sum.sum() == 0:
return 0, 0, 0, target_sum.sum()
else:
weights = None
if average is not None:
precisions = np.average(precisions, weights=weights)
recalls = np.average(recalls, weights=weights)
fscores = np.average(fscores, weights=weights)
supports = target_sum.sum()
else:
precisions = {matrix_labels[k]: precisions[k] for k in range(len(matrix_labels))}
recalls = {matrix_labels[k]: recalls[k] for k in range(len(matrix_labels))}
fscores = {matrix_labels[k]: fscores[k] for k in range(len(matrix_labels))}
supports = {matrix_labels[k]: target_sum[k] for k in range(len(matrix_labels))}
return precisions, recalls, fscores, supports
def classification_report(Y_hat, Y, beta=1):
"""
Create a report on classification statistics.
Parameters
----------
Y_hat : array-like, shape=(n_samples,)
List of data labels.
Y : array-like, shape=(n_samples,)
List of target truth labels.
beta : float, default=1
Strength of recall relative to precision in the F-score.
Returns
-------
report : dict
Dictionary containing classification statistics in the following
structure:
- 'label': {
'precision':0.5,
'recall':1.0,
'f-score':0.67,
'support':1
},
...
- 'beta': 1,
- 'support': 5,
- 'accuracy': 0.8,
- 'macro avg': {
'precision':0.6,
'recall':0.9,
'f-score':0.67,
},
- 'weighted avg': {
'precision':0.67,
'recall':0.9,
'f-score':0.67,
}
"""
stats = calculate_statistics(Y_hat, Y, beta=beta)
_, _, accuracy, total = calculate_statistics(Y_hat, Y, beta=beta, average='micro')
macro = calculate_statistics(Y_hat, Y, beta=beta, average='macro')
weighted = calculate_statistics(Y_hat, Y, beta=beta, average='weighted')
h = ['precision', 'recall', 'f-score', 'support']
report = {
'beta': beta,
'support': total,
'accuracy': accuracy,
'macro avg': {h[i]: macro[i] for i in range(len(h))},
'weighted avg': {h[i]: weighted[i] for i in range(len(h))}
}
classes = set(stats[0].keys())
for c in classes:
report[c] = {h[i]: stats[i][c] for i in range(len(h))}
return report
|
max_pool_forward_fast
|
A fast implementation of the forward pass for a max pooling layer.
This chooses between the reshape method and the im2col method. If the pooling
regions are square and tile the input image, then we can use the reshape
method which is very fast. Otherwise we fall back on the im2col method, which
is not much faster than the naive method.
|
import numpy as np
try:
from cs231n.im2col_cython import col2im_cython, im2col_cython
from cs231n.im2col_cython import col2im_6d_cython
except ImportError:
print ('run the following from the cs231n directory and try again:')
print ('python setup.py build_ext --inplace')
print ('You may also need to restart your iPython kernel')
from cs231n.im2col import *
def conv_forward_im2col(x, w, b, conv_param):
"""
A fast implementation of the forward pass for a convolutional layer
based on im2col and col2im.
"""
N, C, H, W = x.shape
num_filters, _, filter_height, filter_width = w.shape
stride, pad = conv_param['stride'], conv_param['pad']
# Check dimensions
assert (W + 2 * pad - filter_width) % stride == 0, 'width does not work'
assert (H + 2 * pad - filter_height) % stride == 0, 'height does not work'
# Create output
out_height = (H + 2 * pad - filter_height) / stride + 1
out_width = (W + 2 * pad - filter_width) / stride + 1
out = np.zeros((N, num_filters, out_height, out_width), dtype=x.dtype)
# x_cols = im2col_indices(x, w.shape[2], w.shape[3], pad, stride)
x_cols = im2col_cython(x, w.shape[2], w.shape[3], pad, stride)
res = w.reshape((w.shape[0], -1)).dot(x_cols) + b.reshape(-1, 1)
out = res.reshape(w.shape[0], out.shape[2], out.shape[3], x.shape[0])
out = out.transpose(3, 0, 1, 2)
cache = (x, w, b, conv_param, x_cols)
return out, cache
def conv_forward_strides(x, w, b, conv_param):
N, C, H, W = x.shape
F, _, HH, WW = w.shape
stride, pad = conv_param['stride'], conv_param['pad']
# Check dimensions
assert (W + 2 * pad - WW) % stride == 0, 'width does not work'
assert (H + 2 * pad - HH) % stride == 0, 'height does not work'
# Pad the input
p = pad
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
# Figure out output dimensions
H += 2 * pad
W += 2 * pad
out_h = (H - HH) / stride + 1
out_w = (W - WW) / stride + 1
# Perform an im2col operation by picking clever strides
shape = (C, HH, WW, N, out_h, out_w)
strides = (H * W, W, 1, C * H * W, stride * W, stride)
strides = x.itemsize * np.array(strides)
x_stride = np.lib.stride_tricks.as_strided(x_padded,
shape=shape, strides=strides)
x_cols = np.ascontiguousarray(x_stride)
x_cols.shape = (C * HH * WW, N * out_h * out_w)
# Now all our convolutions are a big matrix multiply
res = w.reshape(F, -1).dot(x_cols) + b.reshape(-1, 1)
# Reshape the output
res.shape = (F, N, out_h, out_w)
out = res.transpose(1, 0, 2, 3)
# Be nice and return a contiguous array
# The old version of conv_forward_fast doesn't do this, so for a fair
# comparison we won't either
out = np.ascontiguousarray(out)
cache = (x, w, b, conv_param, x_cols)
return out, cache
def conv_backward_strides(dout, cache):
x, w, b, conv_param, x_cols = cache
stride, pad = conv_param['stride'], conv_param['pad']
N, C, H, W = x.shape
F, _, HH, WW = w.shape
_, _, out_h, out_w = dout.shape
db = np.sum(dout, axis=(0, 2, 3))
dout_reshaped = dout.transpose(1, 0, 2, 3).reshape(F, -1)
dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)
dx_cols = w.reshape(F, -1).T.dot(dout_reshaped)
dx_cols.shape = (C, HH, WW, N, out_h, out_w)
dx = col2im_6d_cython(dx_cols, N, C, H, W, HH, WW, pad, stride)
return dx, dw, db
def conv_backward_im2col(dout, cache):
"""
A fast implementation of the backward pass for a convolutional layer
based on im2col and col2im.
"""
x, w, b, conv_param, x_cols = cache
stride, pad = conv_param['stride'], conv_param['pad']
db = np.sum(dout, axis=(0, 2, 3))
num_filters, _, filter_height, filter_width = w.shape
dout_reshaped = dout.transpose(1, 2, 3, 0).reshape(num_filters, -1)
dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)
dx_cols = w.reshape(num_filters, -1).T.dot(dout_reshaped)
# dx = col2im_indices(dx_cols, x.shape, filter_height, filter_width, pad, stride)
dx = col2im_cython(dx_cols, x.shape[0], x.shape[1], x.shape[2], x.shape[3],
filter_height, filter_width, pad, stride)
return dx, dw, db
conv_forward_fast = conv_forward_strides
conv_backward_fast = conv_backward_strides
# MASKED: max_pool_forward_fast function (lines 132-153)
def max_pool_backward_fast(dout, cache):
"""
A fast implementation of the backward pass for a max pooling layer.
This switches between the reshape method an the im2col method depending on
which method was used to generate the cache.
"""
method, real_cache = cache
if method == 'reshape':
return max_pool_backward_reshape(dout, real_cache)
elif method == 'im2col':
return max_pool_backward_im2col(dout, real_cache)
else:
raise ValueError('Unrecognized method "%s"' % method)
def max_pool_forward_reshape(x, pool_param):
"""
A fast implementation of the forward pass for the max pooling layer that uses
some clever reshaping.
This can only be used for square pooling regions that tile the input.
"""
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
assert pool_height == pool_width == stride, 'Invalid pool params'
assert H % pool_height == 0
assert W % pool_height == 0
x_reshaped = x.reshape(N, C, H / pool_height, pool_height,
W / pool_width, pool_width)
out = x_reshaped.max(axis=3).max(axis=4)
cache = (x, x_reshaped, out)
return out, cache
def max_pool_backward_reshape(dout, cache):
"""
A fast implementation of the backward pass for the max pooling layer that
uses some clever broadcasting and reshaping.
This can only be used if the forward pass was computed using
max_pool_forward_reshape.
NOTE: If there are multiple argmaxes, this method will assign gradient to
ALL argmax elements of the input rather than picking one. In this case the
gradient will actually be incorrect. However this is unlikely to occur in
practice, so it shouldn't matter much. One possible solution is to split the
upstream gradient equally among all argmax elements; this should result in a
valid subgradient. You can make this happen by uncommenting the line below;
however this results in a significant performance penalty (about 40% slower)
and is unlikely to matter in practice so we don't do it.
"""
x, x_reshaped, out = cache
dx_reshaped = np.zeros_like(x_reshaped)
out_newaxis = out[:, :, :, np.newaxis, :, np.newaxis]
mask = (x_reshaped == out_newaxis)
dout_newaxis = dout[:, :, :, np.newaxis, :, np.newaxis]
dout_broadcast, _ = np.broadcast_arrays(dout_newaxis, dx_reshaped)
dx_reshaped[mask] = dout_broadcast[mask]
dx_reshaped /= np.sum(mask, axis=(3, 5), keepdims=True)
dx = dx_reshaped.reshape(x.shape)
return dx
def max_pool_forward_im2col(x, pool_param):
"""
An implementation of the forward pass for max pooling based on im2col.
This isn't much faster than the naive version, so it should be avoided if
possible.
"""
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
assert (H - pool_height) % stride == 0, 'Invalid height'
assert (W - pool_width) % stride == 0, 'Invalid width'
out_height = (H - pool_height) / stride + 1
out_width = (W - pool_width) / stride + 1
x_split = x.reshape(N * C, 1, H, W)
x_cols = im2col(x_split, pool_height, pool_width, padding=0, stride=stride)
x_cols_argmax = np.argmax(x_cols, axis=0)
x_cols_max = x_cols[x_cols_argmax, np.arange(x_cols.shape[1])]
out = x_cols_max.reshape(out_height, out_width, N, C).transpose(2, 3, 0, 1)
cache = (x, x_cols, x_cols_argmax, pool_param)
return out, cache
def max_pool_backward_im2col(dout, cache):
"""
An implementation of the backward pass for max pooling based on im2col.
This isn't much faster than the naive version, so it should be avoided if
possible.
"""
x, x_cols, x_cols_argmax, pool_param = cache
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
dout_reshaped = dout.transpose(2, 3, 0, 1).flatten()
dx_cols = np.zeros_like(x_cols)
dx_cols[x_cols_argmax, np.arange(dx_cols.shape[1])] = dout_reshaped
dx = col2im_indices(dx_cols, (N * C, 1, H, W), pool_height, pool_width,
padding=0, stride=stride)
dx = dx.reshape(x.shape)
return dx
|
def max_pool_forward_fast(x, pool_param):
"""
A fast implementation of the forward pass for a max pooling layer.
This chooses between the reshape method and the im2col method. If the pooling
regions are square and tile the input image, then we can use the reshape
method which is very fast. Otherwise we fall back on the im2col method, which
is not much faster than the naive method.
"""
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
same_size = pool_height == pool_width == stride
tiles = H % pool_height == 0 and W % pool_width == 0
if same_size and tiles:
out, reshape_cache = max_pool_forward_reshape(x, pool_param)
cache = ('reshape', reshape_cache)
else:
out, im2col_cache = max_pool_forward_im2col(x, pool_param)
cache = ('im2col', im2col_cache)
return out, cache
| 132 | 153 |
import numpy as np
try:
from cs231n.im2col_cython import col2im_cython, im2col_cython
from cs231n.im2col_cython import col2im_6d_cython
except ImportError:
print ('run the following from the cs231n directory and try again:')
print ('python setup.py build_ext --inplace')
print ('You may also need to restart your iPython kernel')
from cs231n.im2col import *
def conv_forward_im2col(x, w, b, conv_param):
"""
A fast implementation of the forward pass for a convolutional layer
based on im2col and col2im.
"""
N, C, H, W = x.shape
num_filters, _, filter_height, filter_width = w.shape
stride, pad = conv_param['stride'], conv_param['pad']
# Check dimensions
assert (W + 2 * pad - filter_width) % stride == 0, 'width does not work'
assert (H + 2 * pad - filter_height) % stride == 0, 'height does not work'
# Create output
out_height = (H + 2 * pad - filter_height) / stride + 1
out_width = (W + 2 * pad - filter_width) / stride + 1
out = np.zeros((N, num_filters, out_height, out_width), dtype=x.dtype)
# x_cols = im2col_indices(x, w.shape[2], w.shape[3], pad, stride)
x_cols = im2col_cython(x, w.shape[2], w.shape[3], pad, stride)
res = w.reshape((w.shape[0], -1)).dot(x_cols) + b.reshape(-1, 1)
out = res.reshape(w.shape[0], out.shape[2], out.shape[3], x.shape[0])
out = out.transpose(3, 0, 1, 2)
cache = (x, w, b, conv_param, x_cols)
return out, cache
def conv_forward_strides(x, w, b, conv_param):
N, C, H, W = x.shape
F, _, HH, WW = w.shape
stride, pad = conv_param['stride'], conv_param['pad']
# Check dimensions
assert (W + 2 * pad - WW) % stride == 0, 'width does not work'
assert (H + 2 * pad - HH) % stride == 0, 'height does not work'
# Pad the input
p = pad
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
# Figure out output dimensions
H += 2 * pad
W += 2 * pad
out_h = (H - HH) / stride + 1
out_w = (W - WW) / stride + 1
# Perform an im2col operation by picking clever strides
shape = (C, HH, WW, N, out_h, out_w)
strides = (H * W, W, 1, C * H * W, stride * W, stride)
strides = x.itemsize * np.array(strides)
x_stride = np.lib.stride_tricks.as_strided(x_padded,
shape=shape, strides=strides)
x_cols = np.ascontiguousarray(x_stride)
x_cols.shape = (C * HH * WW, N * out_h * out_w)
# Now all our convolutions are a big matrix multiply
res = w.reshape(F, -1).dot(x_cols) + b.reshape(-1, 1)
# Reshape the output
res.shape = (F, N, out_h, out_w)
out = res.transpose(1, 0, 2, 3)
# Be nice and return a contiguous array
# The old version of conv_forward_fast doesn't do this, so for a fair
# comparison we won't either
out = np.ascontiguousarray(out)
cache = (x, w, b, conv_param, x_cols)
return out, cache
def conv_backward_strides(dout, cache):
x, w, b, conv_param, x_cols = cache
stride, pad = conv_param['stride'], conv_param['pad']
N, C, H, W = x.shape
F, _, HH, WW = w.shape
_, _, out_h, out_w = dout.shape
db = np.sum(dout, axis=(0, 2, 3))
dout_reshaped = dout.transpose(1, 0, 2, 3).reshape(F, -1)
dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)
dx_cols = w.reshape(F, -1).T.dot(dout_reshaped)
dx_cols.shape = (C, HH, WW, N, out_h, out_w)
dx = col2im_6d_cython(dx_cols, N, C, H, W, HH, WW, pad, stride)
return dx, dw, db
def conv_backward_im2col(dout, cache):
"""
A fast implementation of the backward pass for a convolutional layer
based on im2col and col2im.
"""
x, w, b, conv_param, x_cols = cache
stride, pad = conv_param['stride'], conv_param['pad']
db = np.sum(dout, axis=(0, 2, 3))
num_filters, _, filter_height, filter_width = w.shape
dout_reshaped = dout.transpose(1, 2, 3, 0).reshape(num_filters, -1)
dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)
dx_cols = w.reshape(num_filters, -1).T.dot(dout_reshaped)
# dx = col2im_indices(dx_cols, x.shape, filter_height, filter_width, pad, stride)
dx = col2im_cython(dx_cols, x.shape[0], x.shape[1], x.shape[2], x.shape[3],
filter_height, filter_width, pad, stride)
return dx, dw, db
conv_forward_fast = conv_forward_strides
conv_backward_fast = conv_backward_strides
def max_pool_forward_fast(x, pool_param):
"""
A fast implementation of the forward pass for a max pooling layer.
This chooses between the reshape method and the im2col method. If the pooling
regions are square and tile the input image, then we can use the reshape
method which is very fast. Otherwise we fall back on the im2col method, which
is not much faster than the naive method.
"""
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
same_size = pool_height == pool_width == stride
tiles = H % pool_height == 0 and W % pool_width == 0
if same_size and tiles:
out, reshape_cache = max_pool_forward_reshape(x, pool_param)
cache = ('reshape', reshape_cache)
else:
out, im2col_cache = max_pool_forward_im2col(x, pool_param)
cache = ('im2col', im2col_cache)
return out, cache
def max_pool_backward_fast(dout, cache):
"""
A fast implementation of the backward pass for a max pooling layer.
This switches between the reshape method an the im2col method depending on
which method was used to generate the cache.
"""
method, real_cache = cache
if method == 'reshape':
return max_pool_backward_reshape(dout, real_cache)
elif method == 'im2col':
return max_pool_backward_im2col(dout, real_cache)
else:
raise ValueError('Unrecognized method "%s"' % method)
def max_pool_forward_reshape(x, pool_param):
"""
A fast implementation of the forward pass for the max pooling layer that uses
some clever reshaping.
This can only be used for square pooling regions that tile the input.
"""
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
assert pool_height == pool_width == stride, 'Invalid pool params'
assert H % pool_height == 0
assert W % pool_height == 0
x_reshaped = x.reshape(N, C, H / pool_height, pool_height,
W / pool_width, pool_width)
out = x_reshaped.max(axis=3).max(axis=4)
cache = (x, x_reshaped, out)
return out, cache
def max_pool_backward_reshape(dout, cache):
"""
A fast implementation of the backward pass for the max pooling layer that
uses some clever broadcasting and reshaping.
This can only be used if the forward pass was computed using
max_pool_forward_reshape.
NOTE: If there are multiple argmaxes, this method will assign gradient to
ALL argmax elements of the input rather than picking one. In this case the
gradient will actually be incorrect. However this is unlikely to occur in
practice, so it shouldn't matter much. One possible solution is to split the
upstream gradient equally among all argmax elements; this should result in a
valid subgradient. You can make this happen by uncommenting the line below;
however this results in a significant performance penalty (about 40% slower)
and is unlikely to matter in practice so we don't do it.
"""
x, x_reshaped, out = cache
dx_reshaped = np.zeros_like(x_reshaped)
out_newaxis = out[:, :, :, np.newaxis, :, np.newaxis]
mask = (x_reshaped == out_newaxis)
dout_newaxis = dout[:, :, :, np.newaxis, :, np.newaxis]
dout_broadcast, _ = np.broadcast_arrays(dout_newaxis, dx_reshaped)
dx_reshaped[mask] = dout_broadcast[mask]
dx_reshaped /= np.sum(mask, axis=(3, 5), keepdims=True)
dx = dx_reshaped.reshape(x.shape)
return dx
def max_pool_forward_im2col(x, pool_param):
"""
An implementation of the forward pass for max pooling based on im2col.
This isn't much faster than the naive version, so it should be avoided if
possible.
"""
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
assert (H - pool_height) % stride == 0, 'Invalid height'
assert (W - pool_width) % stride == 0, 'Invalid width'
out_height = (H - pool_height) / stride + 1
out_width = (W - pool_width) / stride + 1
x_split = x.reshape(N * C, 1, H, W)
x_cols = im2col(x_split, pool_height, pool_width, padding=0, stride=stride)
x_cols_argmax = np.argmax(x_cols, axis=0)
x_cols_max = x_cols[x_cols_argmax, np.arange(x_cols.shape[1])]
out = x_cols_max.reshape(out_height, out_width, N, C).transpose(2, 3, 0, 1)
cache = (x, x_cols, x_cols_argmax, pool_param)
return out, cache
def max_pool_backward_im2col(dout, cache):
"""
An implementation of the backward pass for max pooling based on im2col.
This isn't much faster than the naive version, so it should be avoided if
possible.
"""
x, x_cols, x_cols_argmax, pool_param = cache
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
dout_reshaped = dout.transpose(2, 3, 0, 1).flatten()
dx_cols = np.zeros_like(x_cols)
dx_cols[x_cols_argmax, np.arange(dx_cols.shape[1])] = dout_reshaped
dx = col2im_indices(dx_cols, (N * C, 1, H, W), pool_height, pool_width,
padding=0, stride=stride)
dx = dx.reshape(x.shape)
return dx
|
max_pool_backward_reshape
|
A fast implementation of the backward pass for the max pooling layer that
uses some clever broadcasting and reshaping.
This can only be used if the forward pass was computed using
max_pool_forward_reshape.
NOTE: If there are multiple argmaxes, this method will assign gradient to
ALL argmax elements of the input rather than picking one. In this case the
gradient will actually be incorrect. However this is unlikely to occur in
practice, so it shouldn't matter much. One possible solution is to split the
upstream gradient equally among all argmax elements; this should result in a
valid subgradient. You can make this happen by uncommenting the line below;
however this results in a significant performance penalty (about 40% slower)
and is unlikely to matter in practice so we don't do it.
|
import numpy as np
try:
from cs231n.im2col_cython import col2im_cython, im2col_cython
from cs231n.im2col_cython import col2im_6d_cython
except ImportError:
print ('run the following from the cs231n directory and try again:')
print ('python setup.py build_ext --inplace')
print ('You may also need to restart your iPython kernel')
from cs231n.im2col import *
def conv_forward_im2col(x, w, b, conv_param):
"""
A fast implementation of the forward pass for a convolutional layer
based on im2col and col2im.
"""
N, C, H, W = x.shape
num_filters, _, filter_height, filter_width = w.shape
stride, pad = conv_param['stride'], conv_param['pad']
# Check dimensions
assert (W + 2 * pad - filter_width) % stride == 0, 'width does not work'
assert (H + 2 * pad - filter_height) % stride == 0, 'height does not work'
# Create output
out_height = (H + 2 * pad - filter_height) / stride + 1
out_width = (W + 2 * pad - filter_width) / stride + 1
out = np.zeros((N, num_filters, out_height, out_width), dtype=x.dtype)
# x_cols = im2col_indices(x, w.shape[2], w.shape[3], pad, stride)
x_cols = im2col_cython(x, w.shape[2], w.shape[3], pad, stride)
res = w.reshape((w.shape[0], -1)).dot(x_cols) + b.reshape(-1, 1)
out = res.reshape(w.shape[0], out.shape[2], out.shape[3], x.shape[0])
out = out.transpose(3, 0, 1, 2)
cache = (x, w, b, conv_param, x_cols)
return out, cache
def conv_forward_strides(x, w, b, conv_param):
N, C, H, W = x.shape
F, _, HH, WW = w.shape
stride, pad = conv_param['stride'], conv_param['pad']
# Check dimensions
assert (W + 2 * pad - WW) % stride == 0, 'width does not work'
assert (H + 2 * pad - HH) % stride == 0, 'height does not work'
# Pad the input
p = pad
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
# Figure out output dimensions
H += 2 * pad
W += 2 * pad
out_h = (H - HH) / stride + 1
out_w = (W - WW) / stride + 1
# Perform an im2col operation by picking clever strides
shape = (C, HH, WW, N, out_h, out_w)
strides = (H * W, W, 1, C * H * W, stride * W, stride)
strides = x.itemsize * np.array(strides)
x_stride = np.lib.stride_tricks.as_strided(x_padded,
shape=shape, strides=strides)
x_cols = np.ascontiguousarray(x_stride)
x_cols.shape = (C * HH * WW, N * out_h * out_w)
# Now all our convolutions are a big matrix multiply
res = w.reshape(F, -1).dot(x_cols) + b.reshape(-1, 1)
# Reshape the output
res.shape = (F, N, out_h, out_w)
out = res.transpose(1, 0, 2, 3)
# Be nice and return a contiguous array
# The old version of conv_forward_fast doesn't do this, so for a fair
# comparison we won't either
out = np.ascontiguousarray(out)
cache = (x, w, b, conv_param, x_cols)
return out, cache
def conv_backward_strides(dout, cache):
x, w, b, conv_param, x_cols = cache
stride, pad = conv_param['stride'], conv_param['pad']
N, C, H, W = x.shape
F, _, HH, WW = w.shape
_, _, out_h, out_w = dout.shape
db = np.sum(dout, axis=(0, 2, 3))
dout_reshaped = dout.transpose(1, 0, 2, 3).reshape(F, -1)
dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)
dx_cols = w.reshape(F, -1).T.dot(dout_reshaped)
dx_cols.shape = (C, HH, WW, N, out_h, out_w)
dx = col2im_6d_cython(dx_cols, N, C, H, W, HH, WW, pad, stride)
return dx, dw, db
def conv_backward_im2col(dout, cache):
"""
A fast implementation of the backward pass for a convolutional layer
based on im2col and col2im.
"""
x, w, b, conv_param, x_cols = cache
stride, pad = conv_param['stride'], conv_param['pad']
db = np.sum(dout, axis=(0, 2, 3))
num_filters, _, filter_height, filter_width = w.shape
dout_reshaped = dout.transpose(1, 2, 3, 0).reshape(num_filters, -1)
dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)
dx_cols = w.reshape(num_filters, -1).T.dot(dout_reshaped)
# dx = col2im_indices(dx_cols, x.shape, filter_height, filter_width, pad, stride)
dx = col2im_cython(dx_cols, x.shape[0], x.shape[1], x.shape[2], x.shape[3],
filter_height, filter_width, pad, stride)
return dx, dw, db
conv_forward_fast = conv_forward_strides
conv_backward_fast = conv_backward_strides
def max_pool_forward_fast(x, pool_param):
"""
A fast implementation of the forward pass for a max pooling layer.
This chooses between the reshape method and the im2col method. If the pooling
regions are square and tile the input image, then we can use the reshape
method which is very fast. Otherwise we fall back on the im2col method, which
is not much faster than the naive method.
"""
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
same_size = pool_height == pool_width == stride
tiles = H % pool_height == 0 and W % pool_width == 0
if same_size and tiles:
out, reshape_cache = max_pool_forward_reshape(x, pool_param)
cache = ('reshape', reshape_cache)
else:
out, im2col_cache = max_pool_forward_im2col(x, pool_param)
cache = ('im2col', im2col_cache)
return out, cache
def max_pool_backward_fast(dout, cache):
"""
A fast implementation of the backward pass for a max pooling layer.
This switches between the reshape method an the im2col method depending on
which method was used to generate the cache.
"""
method, real_cache = cache
if method == 'reshape':
return max_pool_backward_reshape(dout, real_cache)
elif method == 'im2col':
return max_pool_backward_im2col(dout, real_cache)
else:
raise ValueError('Unrecognized method "%s"' % method)
def max_pool_forward_reshape(x, pool_param):
"""
A fast implementation of the forward pass for the max pooling layer that uses
some clever reshaping.
This can only be used for square pooling regions that tile the input.
"""
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
assert pool_height == pool_width == stride, 'Invalid pool params'
assert H % pool_height == 0
assert W % pool_height == 0
x_reshaped = x.reshape(N, C, H / pool_height, pool_height,
W / pool_width, pool_width)
out = x_reshaped.max(axis=3).max(axis=4)
cache = (x, x_reshaped, out)
return out, cache
# MASKED: max_pool_backward_reshape function (lines 193-221)
def max_pool_forward_im2col(x, pool_param):
"""
An implementation of the forward pass for max pooling based on im2col.
This isn't much faster than the naive version, so it should be avoided if
possible.
"""
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
assert (H - pool_height) % stride == 0, 'Invalid height'
assert (W - pool_width) % stride == 0, 'Invalid width'
out_height = (H - pool_height) / stride + 1
out_width = (W - pool_width) / stride + 1
x_split = x.reshape(N * C, 1, H, W)
x_cols = im2col(x_split, pool_height, pool_width, padding=0, stride=stride)
x_cols_argmax = np.argmax(x_cols, axis=0)
x_cols_max = x_cols[x_cols_argmax, np.arange(x_cols.shape[1])]
out = x_cols_max.reshape(out_height, out_width, N, C).transpose(2, 3, 0, 1)
cache = (x, x_cols, x_cols_argmax, pool_param)
return out, cache
def max_pool_backward_im2col(dout, cache):
"""
An implementation of the backward pass for max pooling based on im2col.
This isn't much faster than the naive version, so it should be avoided if
possible.
"""
x, x_cols, x_cols_argmax, pool_param = cache
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
dout_reshaped = dout.transpose(2, 3, 0, 1).flatten()
dx_cols = np.zeros_like(x_cols)
dx_cols[x_cols_argmax, np.arange(dx_cols.shape[1])] = dout_reshaped
dx = col2im_indices(dx_cols, (N * C, 1, H, W), pool_height, pool_width,
padding=0, stride=stride)
dx = dx.reshape(x.shape)
return dx
|
def max_pool_backward_reshape(dout, cache):
"""
A fast implementation of the backward pass for the max pooling layer that
uses some clever broadcasting and reshaping.
This can only be used if the forward pass was computed using
max_pool_forward_reshape.
NOTE: If there are multiple argmaxes, this method will assign gradient to
ALL argmax elements of the input rather than picking one. In this case the
gradient will actually be incorrect. However this is unlikely to occur in
practice, so it shouldn't matter much. One possible solution is to split the
upstream gradient equally among all argmax elements; this should result in a
valid subgradient. You can make this happen by uncommenting the line below;
however this results in a significant performance penalty (about 40% slower)
and is unlikely to matter in practice so we don't do it.
"""
x, x_reshaped, out = cache
dx_reshaped = np.zeros_like(x_reshaped)
out_newaxis = out[:, :, :, np.newaxis, :, np.newaxis]
mask = (x_reshaped == out_newaxis)
dout_newaxis = dout[:, :, :, np.newaxis, :, np.newaxis]
dout_broadcast, _ = np.broadcast_arrays(dout_newaxis, dx_reshaped)
dx_reshaped[mask] = dout_broadcast[mask]
dx_reshaped /= np.sum(mask, axis=(3, 5), keepdims=True)
dx = dx_reshaped.reshape(x.shape)
return dx
| 193 | 221 |
import numpy as np
try:
from cs231n.im2col_cython import col2im_cython, im2col_cython
from cs231n.im2col_cython import col2im_6d_cython
except ImportError:
print ('run the following from the cs231n directory and try again:')
print ('python setup.py build_ext --inplace')
print ('You may also need to restart your iPython kernel')
from cs231n.im2col import *
def conv_forward_im2col(x, w, b, conv_param):
"""
A fast implementation of the forward pass for a convolutional layer
based on im2col and col2im.
"""
N, C, H, W = x.shape
num_filters, _, filter_height, filter_width = w.shape
stride, pad = conv_param['stride'], conv_param['pad']
# Check dimensions
assert (W + 2 * pad - filter_width) % stride == 0, 'width does not work'
assert (H + 2 * pad - filter_height) % stride == 0, 'height does not work'
# Create output
out_height = (H + 2 * pad - filter_height) / stride + 1
out_width = (W + 2 * pad - filter_width) / stride + 1
out = np.zeros((N, num_filters, out_height, out_width), dtype=x.dtype)
# x_cols = im2col_indices(x, w.shape[2], w.shape[3], pad, stride)
x_cols = im2col_cython(x, w.shape[2], w.shape[3], pad, stride)
res = w.reshape((w.shape[0], -1)).dot(x_cols) + b.reshape(-1, 1)
out = res.reshape(w.shape[0], out.shape[2], out.shape[3], x.shape[0])
out = out.transpose(3, 0, 1, 2)
cache = (x, w, b, conv_param, x_cols)
return out, cache
def conv_forward_strides(x, w, b, conv_param):
N, C, H, W = x.shape
F, _, HH, WW = w.shape
stride, pad = conv_param['stride'], conv_param['pad']
# Check dimensions
assert (W + 2 * pad - WW) % stride == 0, 'width does not work'
assert (H + 2 * pad - HH) % stride == 0, 'height does not work'
# Pad the input
p = pad
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
# Figure out output dimensions
H += 2 * pad
W += 2 * pad
out_h = (H - HH) / stride + 1
out_w = (W - WW) / stride + 1
# Perform an im2col operation by picking clever strides
shape = (C, HH, WW, N, out_h, out_w)
strides = (H * W, W, 1, C * H * W, stride * W, stride)
strides = x.itemsize * np.array(strides)
x_stride = np.lib.stride_tricks.as_strided(x_padded,
shape=shape, strides=strides)
x_cols = np.ascontiguousarray(x_stride)
x_cols.shape = (C * HH * WW, N * out_h * out_w)
# Now all our convolutions are a big matrix multiply
res = w.reshape(F, -1).dot(x_cols) + b.reshape(-1, 1)
# Reshape the output
res.shape = (F, N, out_h, out_w)
out = res.transpose(1, 0, 2, 3)
# Be nice and return a contiguous array
# The old version of conv_forward_fast doesn't do this, so for a fair
# comparison we won't either
out = np.ascontiguousarray(out)
cache = (x, w, b, conv_param, x_cols)
return out, cache
def conv_backward_strides(dout, cache):
x, w, b, conv_param, x_cols = cache
stride, pad = conv_param['stride'], conv_param['pad']
N, C, H, W = x.shape
F, _, HH, WW = w.shape
_, _, out_h, out_w = dout.shape
db = np.sum(dout, axis=(0, 2, 3))
dout_reshaped = dout.transpose(1, 0, 2, 3).reshape(F, -1)
dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)
dx_cols = w.reshape(F, -1).T.dot(dout_reshaped)
dx_cols.shape = (C, HH, WW, N, out_h, out_w)
dx = col2im_6d_cython(dx_cols, N, C, H, W, HH, WW, pad, stride)
return dx, dw, db
def conv_backward_im2col(dout, cache):
"""
A fast implementation of the backward pass for a convolutional layer
based on im2col and col2im.
"""
x, w, b, conv_param, x_cols = cache
stride, pad = conv_param['stride'], conv_param['pad']
db = np.sum(dout, axis=(0, 2, 3))
num_filters, _, filter_height, filter_width = w.shape
dout_reshaped = dout.transpose(1, 2, 3, 0).reshape(num_filters, -1)
dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)
dx_cols = w.reshape(num_filters, -1).T.dot(dout_reshaped)
# dx = col2im_indices(dx_cols, x.shape, filter_height, filter_width, pad, stride)
dx = col2im_cython(dx_cols, x.shape[0], x.shape[1], x.shape[2], x.shape[3],
filter_height, filter_width, pad, stride)
return dx, dw, db
conv_forward_fast = conv_forward_strides
conv_backward_fast = conv_backward_strides
def max_pool_forward_fast(x, pool_param):
"""
A fast implementation of the forward pass for a max pooling layer.
This chooses between the reshape method and the im2col method. If the pooling
regions are square and tile the input image, then we can use the reshape
method which is very fast. Otherwise we fall back on the im2col method, which
is not much faster than the naive method.
"""
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
same_size = pool_height == pool_width == stride
tiles = H % pool_height == 0 and W % pool_width == 0
if same_size and tiles:
out, reshape_cache = max_pool_forward_reshape(x, pool_param)
cache = ('reshape', reshape_cache)
else:
out, im2col_cache = max_pool_forward_im2col(x, pool_param)
cache = ('im2col', im2col_cache)
return out, cache
def max_pool_backward_fast(dout, cache):
"""
A fast implementation of the backward pass for a max pooling layer.
This switches between the reshape method an the im2col method depending on
which method was used to generate the cache.
"""
method, real_cache = cache
if method == 'reshape':
return max_pool_backward_reshape(dout, real_cache)
elif method == 'im2col':
return max_pool_backward_im2col(dout, real_cache)
else:
raise ValueError('Unrecognized method "%s"' % method)
def max_pool_forward_reshape(x, pool_param):
"""
A fast implementation of the forward pass for the max pooling layer that uses
some clever reshaping.
This can only be used for square pooling regions that tile the input.
"""
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
assert pool_height == pool_width == stride, 'Invalid pool params'
assert H % pool_height == 0
assert W % pool_height == 0
x_reshaped = x.reshape(N, C, H / pool_height, pool_height,
W / pool_width, pool_width)
out = x_reshaped.max(axis=3).max(axis=4)
cache = (x, x_reshaped, out)
return out, cache
def max_pool_backward_reshape(dout, cache):
"""
A fast implementation of the backward pass for the max pooling layer that
uses some clever broadcasting and reshaping.
This can only be used if the forward pass was computed using
max_pool_forward_reshape.
NOTE: If there are multiple argmaxes, this method will assign gradient to
ALL argmax elements of the input rather than picking one. In this case the
gradient will actually be incorrect. However this is unlikely to occur in
practice, so it shouldn't matter much. One possible solution is to split the
upstream gradient equally among all argmax elements; this should result in a
valid subgradient. You can make this happen by uncommenting the line below;
however this results in a significant performance penalty (about 40% slower)
and is unlikely to matter in practice so we don't do it.
"""
x, x_reshaped, out = cache
dx_reshaped = np.zeros_like(x_reshaped)
out_newaxis = out[:, :, :, np.newaxis, :, np.newaxis]
mask = (x_reshaped == out_newaxis)
dout_newaxis = dout[:, :, :, np.newaxis, :, np.newaxis]
dout_broadcast, _ = np.broadcast_arrays(dout_newaxis, dx_reshaped)
dx_reshaped[mask] = dout_broadcast[mask]
dx_reshaped /= np.sum(mask, axis=(3, 5), keepdims=True)
dx = dx_reshaped.reshape(x.shape)
return dx
def max_pool_forward_im2col(x, pool_param):
"""
An implementation of the forward pass for max pooling based on im2col.
This isn't much faster than the naive version, so it should be avoided if
possible.
"""
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
assert (H - pool_height) % stride == 0, 'Invalid height'
assert (W - pool_width) % stride == 0, 'Invalid width'
out_height = (H - pool_height) / stride + 1
out_width = (W - pool_width) / stride + 1
x_split = x.reshape(N * C, 1, H, W)
x_cols = im2col(x_split, pool_height, pool_width, padding=0, stride=stride)
x_cols_argmax = np.argmax(x_cols, axis=0)
x_cols_max = x_cols[x_cols_argmax, np.arange(x_cols.shape[1])]
out = x_cols_max.reshape(out_height, out_width, N, C).transpose(2, 3, 0, 1)
cache = (x, x_cols, x_cols_argmax, pool_param)
return out, cache
def max_pool_backward_im2col(dout, cache):
"""
An implementation of the backward pass for max pooling based on im2col.
This isn't much faster than the naive version, so it should be avoided if
possible.
"""
x, x_cols, x_cols_argmax, pool_param = cache
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
dout_reshaped = dout.transpose(2, 3, 0, 1).flatten()
dx_cols = np.zeros_like(x_cols)
dx_cols[x_cols_argmax, np.arange(dx_cols.shape[1])] = dout_reshaped
dx = col2im_indices(dx_cols, (N * C, 1, H, W), pool_height, pool_width,
padding=0, stride=stride)
dx = dx.reshape(x.shape)
return dx
|
parse
|
Parse value of redis key on redis for encoded HASH, SET types, or
JSON / Protobuf encoded state-wrapped types and prints it
Args:
key: key on redis
|
#!/usr/bin/env python3
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import fire
import json
import jsonpickle
import random
import ast
from typing import Any, Union
from magma.common.redis.client import get_default_client
from magma.common.redis.serializers import get_json_deserializer, \
get_proto_deserializer
from magma.mobilityd.serialize_utils import deserialize_ip_block, \
deserialize_ip_desc
from lte.protos.keyval_pb2 import IPDesc
from lte.protos.policydb_pb2 import PolicyRule, InstalledPolicies
from lte.protos.oai.mme_nas_state_pb2 import MmeNasState, UeContext
from lte.protos.oai.spgw_state_pb2 import SpgwState, S11BearerContext
from lte.protos.oai.s1ap_state_pb2 import S1apState, UeDescription
def _deserialize_session_json(serialized_json_str: bytes) -> str:
"""
Helper function to deserialize sessiond:sessions hash list values
:param serialized_json_str
"""
res = _deserialize_generic_json(str(serialized_json_str, 'utf-8', 'ignore'))
dumped = json.dumps(res, indent=2, sort_keys=True)
return dumped
def _deserialize_generic_json(
element: Union[str, dict, list])-> Union[str, dict, list]:
"""
Helper function to deserialize dictionaries or list with nested
json strings
:param element
"""
if isinstance(element, str):
# try to deserialize as json string
try:
element = ast.literal_eval(element)
except:
try:
element = jsonpickle.decode(element)
except:
return element
if isinstance(element, dict):
keys = element.keys()
elif isinstance(element, list):
keys = range(len(element))
else:
# in case it is neither of the know elements, just return as is
return element
for k in keys:
element[k] = _deserialize_generic_json(element[k])
return element
class StateCLI(object):
"""
CLI for debugging current Magma services state and displaying it
in readable manner.
"""
STATE_DESERIALIZERS = {
'assigned_ip_blocks': deserialize_ip_block,
'ip_states': deserialize_ip_desc,
'sessions': _deserialize_session_json,
'rule_names': get_json_deserializer(),
'rule_ids': get_json_deserializer(),
'rule_versions': get_json_deserializer(),
}
STATE_PROTOS = {
'mme_nas_state': MmeNasState,
'spgw_state': SpgwState,
's1ap_state': S1apState,
'mme': UeContext,
'spgw': S11BearerContext,
's1ap': UeDescription,
'mobilityd_ipdesc_record': IPDesc,
'rules': PolicyRule,
'installed': InstalledPolicies,
}
def __init__(self):
self.client = get_default_client()
def keys(self, redis_key: str):
"""
Get current keys on redis db that match the pattern
Args:
redis_key:pattern to match the redis keys
"""
for k in self.client.keys(pattern="{}*".format(redis_key)):
deserialized_key = k.decode('utf-8')
print(deserialized_key)
# MASKED: parse function (lines 117-146)
def corrupt(self, key):
"""
Mostly used for debugging, purposely corrupts state encoded protobuf
in redis, and writes it back to datastore
Args:
key: key on redis
"""
rand_bytes = random.getrandbits(8)
byte_str = bytes([rand_bytes])
self.client[key] = byte_str
print('Corrupted %s in redis' % key)
def _parse_state_json(self, value):
if value:
deserializer = get_json_deserializer()
value = json.loads(jsonpickle.encode(deserializer(value)))
print(json.dumps(value, indent=2, sort_keys=True))
else:
raise AttributeError('Key not found on redis')
def _parse_state_proto(self, key_type, value):
proto = self.STATE_PROTOS.get(key_type.lower())
if proto:
deserializer = get_proto_deserializer(proto)
print(deserializer(value))
else:
raise AttributeError('Key not found on redis')
def _parse_set_type(self, deserializer, key):
set_values = self.client.smembers(key)
for value in set_values:
print(deserializer(value))
def _parse_hash_type(self, deserializer, key):
value = self.client.hgetall(key)
for key, val in value.items():
print(key.decode('utf-8'))
print(deserializer(val))
if __name__ == "__main__":
state_cli = StateCLI()
try:
fire.Fire(state_cli)
except Exception as e:
print('Error: {}'.format(e))
|
def parse(self, key: str):
"""
Parse value of redis key on redis for encoded HASH, SET types, or
JSON / Protobuf encoded state-wrapped types and prints it
Args:
key: key on redis
"""
redis_type = self.client.type(key).decode('utf-8')
key_type = key
if ":" in key:
key_type = key.split(":")[1]
if redis_type == 'hash':
deserializer = self.STATE_DESERIALIZERS.get(key_type)
if not deserializer:
raise AttributeError('Key not found on redis')
self._parse_hash_type(deserializer, key)
elif redis_type == 'set':
deserializer = self.STATE_DESERIALIZERS.get(key_type)
if not deserializer:
raise AttributeError('Key not found on redis')
self._parse_set_type(deserializer, key)
else:
value = self.client.get(key)
# Try parsing as json first, if there's decoding error, parse proto
try:
self._parse_state_json(value)
except UnicodeDecodeError:
self._parse_state_proto(key_type, value)
| 117 | 146 |
#!/usr/bin/env python3
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import fire
import json
import jsonpickle
import random
import ast
from typing import Any, Union
from magma.common.redis.client import get_default_client
from magma.common.redis.serializers import get_json_deserializer, \
get_proto_deserializer
from magma.mobilityd.serialize_utils import deserialize_ip_block, \
deserialize_ip_desc
from lte.protos.keyval_pb2 import IPDesc
from lte.protos.policydb_pb2 import PolicyRule, InstalledPolicies
from lte.protos.oai.mme_nas_state_pb2 import MmeNasState, UeContext
from lte.protos.oai.spgw_state_pb2 import SpgwState, S11BearerContext
from lte.protos.oai.s1ap_state_pb2 import S1apState, UeDescription
def _deserialize_session_json(serialized_json_str: bytes) -> str:
"""
Helper function to deserialize sessiond:sessions hash list values
:param serialized_json_str
"""
res = _deserialize_generic_json(str(serialized_json_str, 'utf-8', 'ignore'))
dumped = json.dumps(res, indent=2, sort_keys=True)
return dumped
def _deserialize_generic_json(
element: Union[str, dict, list])-> Union[str, dict, list]:
"""
Helper function to deserialize dictionaries or list with nested
json strings
:param element
"""
if isinstance(element, str):
# try to deserialize as json string
try:
element = ast.literal_eval(element)
except:
try:
element = jsonpickle.decode(element)
except:
return element
if isinstance(element, dict):
keys = element.keys()
elif isinstance(element, list):
keys = range(len(element))
else:
# in case it is neither of the know elements, just return as is
return element
for k in keys:
element[k] = _deserialize_generic_json(element[k])
return element
class StateCLI(object):
"""
CLI for debugging current Magma services state and displaying it
in readable manner.
"""
STATE_DESERIALIZERS = {
'assigned_ip_blocks': deserialize_ip_block,
'ip_states': deserialize_ip_desc,
'sessions': _deserialize_session_json,
'rule_names': get_json_deserializer(),
'rule_ids': get_json_deserializer(),
'rule_versions': get_json_deserializer(),
}
STATE_PROTOS = {
'mme_nas_state': MmeNasState,
'spgw_state': SpgwState,
's1ap_state': S1apState,
'mme': UeContext,
'spgw': S11BearerContext,
's1ap': UeDescription,
'mobilityd_ipdesc_record': IPDesc,
'rules': PolicyRule,
'installed': InstalledPolicies,
}
def __init__(self):
self.client = get_default_client()
def keys(self, redis_key: str):
"""
Get current keys on redis db that match the pattern
Args:
redis_key:pattern to match the redis keys
"""
for k in self.client.keys(pattern="{}*".format(redis_key)):
deserialized_key = k.decode('utf-8')
print(deserialized_key)
def parse(self, key: str):
"""
Parse value of redis key on redis for encoded HASH, SET types, or
JSON / Protobuf encoded state-wrapped types and prints it
Args:
key: key on redis
"""
redis_type = self.client.type(key).decode('utf-8')
key_type = key
if ":" in key:
key_type = key.split(":")[1]
if redis_type == 'hash':
deserializer = self.STATE_DESERIALIZERS.get(key_type)
if not deserializer:
raise AttributeError('Key not found on redis')
self._parse_hash_type(deserializer, key)
elif redis_type == 'set':
deserializer = self.STATE_DESERIALIZERS.get(key_type)
if not deserializer:
raise AttributeError('Key not found on redis')
self._parse_set_type(deserializer, key)
else:
value = self.client.get(key)
# Try parsing as json first, if there's decoding error, parse proto
try:
self._parse_state_json(value)
except UnicodeDecodeError:
self._parse_state_proto(key_type, value)
def corrupt(self, key):
"""
Mostly used for debugging, purposely corrupts state encoded protobuf
in redis, and writes it back to datastore
Args:
key: key on redis
"""
rand_bytes = random.getrandbits(8)
byte_str = bytes([rand_bytes])
self.client[key] = byte_str
print('Corrupted %s in redis' % key)
def _parse_state_json(self, value):
if value:
deserializer = get_json_deserializer()
value = json.loads(jsonpickle.encode(deserializer(value)))
print(json.dumps(value, indent=2, sort_keys=True))
else:
raise AttributeError('Key not found on redis')
def _parse_state_proto(self, key_type, value):
proto = self.STATE_PROTOS.get(key_type.lower())
if proto:
deserializer = get_proto_deserializer(proto)
print(deserializer(value))
else:
raise AttributeError('Key not found on redis')
def _parse_set_type(self, deserializer, key):
set_values = self.client.smembers(key)
for value in set_values:
print(deserializer(value))
def _parse_hash_type(self, deserializer, key):
value = self.client.hgetall(key)
for key, val in value.items():
print(key.decode('utf-8'))
print(deserializer(val))
if __name__ == "__main__":
state_cli = StateCLI()
try:
fire.Fire(state_cli)
except Exception as e:
print('Error: {}'.format(e))
|
corrupt
|
Mostly used for debugging, purposely corrupts state encoded protobuf
in redis, and writes it back to datastore
Args:
key: key on redis
|
#!/usr/bin/env python3
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import fire
import json
import jsonpickle
import random
import ast
from typing import Any, Union
from magma.common.redis.client import get_default_client
from magma.common.redis.serializers import get_json_deserializer, \
get_proto_deserializer
from magma.mobilityd.serialize_utils import deserialize_ip_block, \
deserialize_ip_desc
from lte.protos.keyval_pb2 import IPDesc
from lte.protos.policydb_pb2 import PolicyRule, InstalledPolicies
from lte.protos.oai.mme_nas_state_pb2 import MmeNasState, UeContext
from lte.protos.oai.spgw_state_pb2 import SpgwState, S11BearerContext
from lte.protos.oai.s1ap_state_pb2 import S1apState, UeDescription
def _deserialize_session_json(serialized_json_str: bytes) -> str:
"""
Helper function to deserialize sessiond:sessions hash list values
:param serialized_json_str
"""
res = _deserialize_generic_json(str(serialized_json_str, 'utf-8', 'ignore'))
dumped = json.dumps(res, indent=2, sort_keys=True)
return dumped
def _deserialize_generic_json(
element: Union[str, dict, list])-> Union[str, dict, list]:
"""
Helper function to deserialize dictionaries or list with nested
json strings
:param element
"""
if isinstance(element, str):
# try to deserialize as json string
try:
element = ast.literal_eval(element)
except:
try:
element = jsonpickle.decode(element)
except:
return element
if isinstance(element, dict):
keys = element.keys()
elif isinstance(element, list):
keys = range(len(element))
else:
# in case it is neither of the know elements, just return as is
return element
for k in keys:
element[k] = _deserialize_generic_json(element[k])
return element
class StateCLI(object):
"""
CLI for debugging current Magma services state and displaying it
in readable manner.
"""
STATE_DESERIALIZERS = {
'assigned_ip_blocks': deserialize_ip_block,
'ip_states': deserialize_ip_desc,
'sessions': _deserialize_session_json,
'rule_names': get_json_deserializer(),
'rule_ids': get_json_deserializer(),
'rule_versions': get_json_deserializer(),
}
STATE_PROTOS = {
'mme_nas_state': MmeNasState,
'spgw_state': SpgwState,
's1ap_state': S1apState,
'mme': UeContext,
'spgw': S11BearerContext,
's1ap': UeDescription,
'mobilityd_ipdesc_record': IPDesc,
'rules': PolicyRule,
'installed': InstalledPolicies,
}
def __init__(self):
self.client = get_default_client()
def keys(self, redis_key: str):
"""
Get current keys on redis db that match the pattern
Args:
redis_key:pattern to match the redis keys
"""
for k in self.client.keys(pattern="{}*".format(redis_key)):
deserialized_key = k.decode('utf-8')
print(deserialized_key)
def parse(self, key: str):
"""
Parse value of redis key on redis for encoded HASH, SET types, or
JSON / Protobuf encoded state-wrapped types and prints it
Args:
key: key on redis
"""
redis_type = self.client.type(key).decode('utf-8')
key_type = key
if ":" in key:
key_type = key.split(":")[1]
if redis_type == 'hash':
deserializer = self.STATE_DESERIALIZERS.get(key_type)
if not deserializer:
raise AttributeError('Key not found on redis')
self._parse_hash_type(deserializer, key)
elif redis_type == 'set':
deserializer = self.STATE_DESERIALIZERS.get(key_type)
if not deserializer:
raise AttributeError('Key not found on redis')
self._parse_set_type(deserializer, key)
else:
value = self.client.get(key)
# Try parsing as json first, if there's decoding error, parse proto
try:
self._parse_state_json(value)
except UnicodeDecodeError:
self._parse_state_proto(key_type, value)
# MASKED: corrupt function (lines 148-160)
def _parse_state_json(self, value):
if value:
deserializer = get_json_deserializer()
value = json.loads(jsonpickle.encode(deserializer(value)))
print(json.dumps(value, indent=2, sort_keys=True))
else:
raise AttributeError('Key not found on redis')
def _parse_state_proto(self, key_type, value):
proto = self.STATE_PROTOS.get(key_type.lower())
if proto:
deserializer = get_proto_deserializer(proto)
print(deserializer(value))
else:
raise AttributeError('Key not found on redis')
def _parse_set_type(self, deserializer, key):
set_values = self.client.smembers(key)
for value in set_values:
print(deserializer(value))
def _parse_hash_type(self, deserializer, key):
value = self.client.hgetall(key)
for key, val in value.items():
print(key.decode('utf-8'))
print(deserializer(val))
if __name__ == "__main__":
state_cli = StateCLI()
try:
fire.Fire(state_cli)
except Exception as e:
print('Error: {}'.format(e))
|
def corrupt(self, key):
"""
Mostly used for debugging, purposely corrupts state encoded protobuf
in redis, and writes it back to datastore
Args:
key: key on redis
"""
rand_bytes = random.getrandbits(8)
byte_str = bytes([rand_bytes])
self.client[key] = byte_str
print('Corrupted %s in redis' % key)
| 148 | 160 |
#!/usr/bin/env python3
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import fire
import json
import jsonpickle
import random
import ast
from typing import Any, Union
from magma.common.redis.client import get_default_client
from magma.common.redis.serializers import get_json_deserializer, \
get_proto_deserializer
from magma.mobilityd.serialize_utils import deserialize_ip_block, \
deserialize_ip_desc
from lte.protos.keyval_pb2 import IPDesc
from lte.protos.policydb_pb2 import PolicyRule, InstalledPolicies
from lte.protos.oai.mme_nas_state_pb2 import MmeNasState, UeContext
from lte.protos.oai.spgw_state_pb2 import SpgwState, S11BearerContext
from lte.protos.oai.s1ap_state_pb2 import S1apState, UeDescription
def _deserialize_session_json(serialized_json_str: bytes) -> str:
"""
Helper function to deserialize sessiond:sessions hash list values
:param serialized_json_str
"""
res = _deserialize_generic_json(str(serialized_json_str, 'utf-8', 'ignore'))
dumped = json.dumps(res, indent=2, sort_keys=True)
return dumped
def _deserialize_generic_json(
element: Union[str, dict, list])-> Union[str, dict, list]:
"""
Helper function to deserialize dictionaries or list with nested
json strings
:param element
"""
if isinstance(element, str):
# try to deserialize as json string
try:
element = ast.literal_eval(element)
except:
try:
element = jsonpickle.decode(element)
except:
return element
if isinstance(element, dict):
keys = element.keys()
elif isinstance(element, list):
keys = range(len(element))
else:
# in case it is neither of the know elements, just return as is
return element
for k in keys:
element[k] = _deserialize_generic_json(element[k])
return element
class StateCLI(object):
"""
CLI for debugging current Magma services state and displaying it
in readable manner.
"""
STATE_DESERIALIZERS = {
'assigned_ip_blocks': deserialize_ip_block,
'ip_states': deserialize_ip_desc,
'sessions': _deserialize_session_json,
'rule_names': get_json_deserializer(),
'rule_ids': get_json_deserializer(),
'rule_versions': get_json_deserializer(),
}
STATE_PROTOS = {
'mme_nas_state': MmeNasState,
'spgw_state': SpgwState,
's1ap_state': S1apState,
'mme': UeContext,
'spgw': S11BearerContext,
's1ap': UeDescription,
'mobilityd_ipdesc_record': IPDesc,
'rules': PolicyRule,
'installed': InstalledPolicies,
}
def __init__(self):
self.client = get_default_client()
def keys(self, redis_key: str):
"""
Get current keys on redis db that match the pattern
Args:
redis_key:pattern to match the redis keys
"""
for k in self.client.keys(pattern="{}*".format(redis_key)):
deserialized_key = k.decode('utf-8')
print(deserialized_key)
def parse(self, key: str):
"""
Parse value of redis key on redis for encoded HASH, SET types, or
JSON / Protobuf encoded state-wrapped types and prints it
Args:
key: key on redis
"""
redis_type = self.client.type(key).decode('utf-8')
key_type = key
if ":" in key:
key_type = key.split(":")[1]
if redis_type == 'hash':
deserializer = self.STATE_DESERIALIZERS.get(key_type)
if not deserializer:
raise AttributeError('Key not found on redis')
self._parse_hash_type(deserializer, key)
elif redis_type == 'set':
deserializer = self.STATE_DESERIALIZERS.get(key_type)
if not deserializer:
raise AttributeError('Key not found on redis')
self._parse_set_type(deserializer, key)
else:
value = self.client.get(key)
# Try parsing as json first, if there's decoding error, parse proto
try:
self._parse_state_json(value)
except UnicodeDecodeError:
self._parse_state_proto(key_type, value)
def corrupt(self, key):
"""
Mostly used for debugging, purposely corrupts state encoded protobuf
in redis, and writes it back to datastore
Args:
key: key on redis
"""
rand_bytes = random.getrandbits(8)
byte_str = bytes([rand_bytes])
self.client[key] = byte_str
print('Corrupted %s in redis' % key)
def _parse_state_json(self, value):
if value:
deserializer = get_json_deserializer()
value = json.loads(jsonpickle.encode(deserializer(value)))
print(json.dumps(value, indent=2, sort_keys=True))
else:
raise AttributeError('Key not found on redis')
def _parse_state_proto(self, key_type, value):
proto = self.STATE_PROTOS.get(key_type.lower())
if proto:
deserializer = get_proto_deserializer(proto)
print(deserializer(value))
else:
raise AttributeError('Key not found on redis')
def _parse_set_type(self, deserializer, key):
set_values = self.client.smembers(key)
for value in set_values:
print(deserializer(value))
def _parse_hash_type(self, deserializer, key):
value = self.client.hgetall(key)
for key, val in value.items():
print(key.decode('utf-8'))
print(deserializer(val))
if __name__ == "__main__":
state_cli = StateCLI()
try:
fire.Fire(state_cli)
except Exception as e:
print('Error: {}'.format(e))
|
is_since
|
tests whether a date is on or since another date
Parameters
----------
y : int
the year to be tested
sy : int
the since year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
sd: int
the since day. Optional, defaults to 1
Returns
-------
True if the target date is the same as, or more recent than, the since date
|
"""Misc. regolith tools.
"""
import email.utils
import os
import platform
import re
import sys
import time
from copy import deepcopy
from calendar import monthrange
from datetime import datetime, date, timedelta
from regolith.dates import month_to_int, date_to_float, get_dates
from regolith.sorters import doc_date_key, id_key, ene_date_key
from regolith.chained_db import ChainDB
try:
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
HAVE_BIBTEX_PARSER = True
except ImportError:
HAVE_BIBTEX_PARSER = False
LATEX_OPTS = ["-halt-on-error", "-file-line-error"]
if sys.version_info[0] >= 3:
string_types = (str, bytes)
unicode_type = str
else:
pass
# string_types = (str, unicode)
# unicode_type = unicode
DEFAULT_ENCODING = sys.getdefaultencoding()
ON_WINDOWS = platform.system() == "Windows"
ON_MAC = platform.system() == "Darwin"
ON_LINUX = platform.system() == "Linux"
ON_POSIX = os.name == "posix"
def dbdirname(db, rc):
"""Gets the database dir name."""
if db.get("local", False) is False:
dbsdir = os.path.join(rc.builddir, "_dbs")
dbdir = os.path.join(dbsdir, db["name"])
else:
dbdir = db["url"]
return dbdir
def dbpathname(db, rc):
"""Gets the database path name."""
dbdir = dbdirname(db, rc)
dbpath = os.path.join(dbdir, db["path"])
return dbpath
def fallback(cond, backup):
"""Decorator for returning the object if cond is true and a backup if
cond is false. """
def dec(obj):
return obj if cond else backup
return dec
def all_docs_from_collection(client, collname, copy=True):
"""Yield all entries in for all collections of a given name in a given
database. """
yield from client.all_documents(collname, copy=copy)
SHORT_MONTH_NAMES = (
None,
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sept",
"Oct",
"Nov",
"Dec",
)
def date_to_rfc822(y, m, d=1):
"""Converts a date to an RFC 822 formatted string."""
d = datetime(int(y), month_to_int(m), int(d))
return email.utils.format_datetime(d)
def rfc822now():
"""Creates a string of the current time according to RFC 822."""
now = datetime.utcnow()
return email.utils.format_datetime(now)
def gets(seq, key, default=None):
"""Gets a key from every element of a sequence if possible."""
for x in seq:
yield x.get(key, default)
def month_and_year(m=None, y=None):
"""Creates a string from month and year data, if available."""
if y is None:
return "present"
if m is None:
return str(y)
m = month_to_int(m)
return "{0} {1}".format(SHORT_MONTH_NAMES[m], y)
# MASKED: is_since function (lines 122-150)
def is_before(y, by, m=12, d=None, bm=12, bd=None):
"""
tests whether a date is on or before another date
Parameters
----------
y : int
the year to be tested
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Dec
d : int
the day to be tested. Defaults to last day of the month
bm : int or str
the before month. Optional, defaults to Dec
bd: int
the before day. Optional, defaults to last day of the month
Returns
-------
True if the target date is the same as, or earlier than, the before date
"""
if not d:
d = monthrange(y, month_to_int(m))[1]
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
b = "{}/{}/{}".format(bd, month_to_int(bm), by)
d = "{}/{}/{}".format(d, month_to_int(m), y)
before = time.mktime(datetime.strptime(b, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return before >= date
def is_between(y, sy, by, m=1, d=1, sm=1, sd=1, bm=12, bd=None):
"""
tests whether a date is on or between two other dates
returns true if the target date is between the since date and the before
date, inclusive.
Parameters
----------
y : int
the year to be tested
sy : int
the since year
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
bm : int or str
the before month. Optional, defaults to Dec
sd: int
the since day. Optional, defaults to 1
bd: int
the before day. Optional, defaults to 28
Returns
-------
True if the target date is between the since date and the before date,
inclusive (i.e., returns true if the target date is the same as either the
since date or the before date)
"""
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
return is_since(y, sy, m=m, d=d, sm=sm, sd=sd) and is_before(
y, by, m=m, d=d, bm=bm, bd=bd
)
def has_started(sy, sm=None, sd=None):
"""
true if today is after the dates given, inclusive
Parameters
----------
sy : int
the year to check today against
sm : int or str.
the month to check today against. Should be integer or in regolith MONTHS.
default is 1
sd : int.
the day to check today against. Default is 1
Returns
-------
bool
true if today is after dates given
"""
if not sm:
sm = 1
if not sd:
sd = 1
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
start = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
return start <= time.time()
def has_finished(ey, em=None, ed=None):
"""
true if today is before the dates given, inclusive
Parameters
----------
ey : int
end year, the year to check today against
em : int or str.
end month, the month to check today against. Should be integer or in regolith MONTHS.
default is 1
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
if not em:
em = 12
if not ed:
ed = monthrange(ey, month_to_int(em))[1]
e = "{}/{}/{}".format(ed, month_to_int(em), ey)
end = time.mktime(datetime.strptime(e, "%d/%m/%Y").timetuple())
return end <= time.time()
def is_current(sy, ey, sm=None, sd=None, em=None, ed=None):
"""
true if today is between the dates given, inclusive
Parameters
----------
sy : int
start year, the year to check today is after
ey : int
end year, the year to check today is before
sm : int or str
start month, the month to check today is after. Should be integer or in
regolith MONTHS. Default is 1
sd : int
start day, the day to check today after. Default is 1
em : int or str.
end month, the month to check today against. Should be integer or in
regolith MONTHS. Default is 12
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
return has_started(sy, sm, sd) and not has_finished(ey, em, ed)
def filter_publications(citations, authors, reverse=False, bold=True):
"""Filter publications by the author(s)/editor(s)
Parameters
----------
citations : list of dict
The publication citations
authors : set of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
bold : bool, optional
If True put latex bold around the author(s) in question
"""
pubs = []
for pub in citations:
if (
len((set(pub.get("author", [])) | set(
pub.get("editor", []))) & authors)
== 0
):
continue
pub = deepcopy(pub)
if bold:
bold_self = []
for a in pub["author"]:
if a in authors:
bold_self.append("\\textbf{" + a + "}")
else:
bold_self.append(a)
pub["author"] = bold_self
else:
pub = deepcopy(pub)
pubs.append(pub)
pubs.sort(key=doc_date_key, reverse=reverse)
return pubs
def filter_projects(projects, authors, reverse=False):
"""Filter projects by the author(s)
Parameters
----------
projects : list of dict
The publication citations
authors : set of list of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
"""
projs = []
for proj in projects:
team_names = set(gets(proj["team"], "name"))
if len(team_names & authors) == 0:
continue
# FIXME delete these lines if not required. I think they are wrong (SJLB)
# proj = dict(proj)
# proj["team"] = [x for x in proj["team"] if x["name"] in authors]
projs.append(proj)
projs.sort(key=id_key, reverse=reverse)
return projs
def filter_grants(input_grants, names, pi=True, reverse=True, multi_pi=False):
"""Filter grants by those involved
Parameters
----------
input_grants : list of dict
The grants to filter
names : set of str
The authors to be filtered against
pi : bool, optional
If True add the grant amount to that person's total amount
reverse : bool, optional
If True reverse the order, defaults to False
multi_pi : bool, optional
If True compute sub-awards for multi PI grants, defaults to False
"""
grants = []
total_amount = 0.0
subaward_amount = 0.0
for grant in input_grants:
team_names = set(gets(grant["team"], "name"))
if len(team_names & names) == 0:
continue
grant = deepcopy(grant)
person = [x for x in grant["team"] if x["name"] in names][0]
if pi:
if person["position"].lower() == "pi":
total_amount += grant["amount"]
else:
continue
elif multi_pi:
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["multi_pi"] = any(gets(grant["team"], "subaward_amount"))
else:
if person["position"].lower() == "pi":
continue
else:
total_amount += grant["amount"]
subaward_amount += person.get("subaward_amount", 0.0)
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["pi"] = [
x for x in grant["team"] if x["position"].lower() == "pi"
][0]
grant["me"] = person
grants.append(grant)
grants.sort(key=ene_date_key, reverse=reverse)
return grants, total_amount, subaward_amount
def awards_grants_honors(p):
"""Make sorted awards grants and honors list.
Parameters
----------
p : dict
The person entry
"""
aghs = []
for x in p.get("funding", ()):
d = {
"description": "{0} ({1}{2:,})".format(
latex_safe(x["name"]),
x.get("currency", "$").replace("$", "\$"),
x["value"],
),
"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0)),
}
aghs.append(d)
for x in p.get("service", []) + p.get("honors", []):
d = {"description": latex_safe(x["name"])}
if "year" in x:
d.update(
{"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0))}
)
elif "begin_year" in x and "end_year" in x:
d.update(
{
"year": "{}-{}".format(x["begin_year"], x["end_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
elif "begin_year" in x:
d.update(
{
"year": "{}".format(x["begin_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
aghs.append(d)
aghs.sort(key=(lambda x: x.get("_key", 0.0)), reverse=True)
return aghs
HTTP_RE = re.compile(
r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
)
def latex_safe_url(s):
"""Makes a string that is a URL latex safe."""
return s.replace("#", r"\#")
def latex_safe(s, url_check=True, wrapper="url"):
"""Make string latex safe
Parameters
----------
s : str
url_check : bool, optional
If True check for URLs and wrap them, if False check for URL but don't
wrap, defaults to True
wrapper : str, optional
The wrapper for wrapping urls defaults to url
"""
if not s:
return s
if url_check:
# If it looks like a URL make it a latex URL
url_search = HTTP_RE.search(s)
if url_search:
url = r"{start}\{wrapper}{{{s}}}{end}".format(
start=(latex_safe(s[: url_search.start()])),
end=(latex_safe(s[url_search.end():])),
wrapper=wrapper,
s=latex_safe_url(s[url_search.start(): url_search.end()]),
)
return url
return (
s.replace("&", r"\&")
.replace("$", r"\$")
.replace("#", r"\#")
.replace("_", r"\_")
)
def make_bibtex_file(pubs, pid, person_dir="."):
"""Make a bibtex file given the publications
Parameters
----------
pubs : list of dict
The publications
pid : str
The person id
person_dir : str, optional
The person's directory
"""
if not HAVE_BIBTEX_PARSER:
return None
skip_keys = {"ID", "ENTRYTYPE", "author"}
bibdb = BibDatabase()
bibwriter = BibTexWriter()
bibdb.entries = ents = []
for pub in pubs:
ent = dict(pub)
ent["ID"] = ent.pop("_id")
ent["ENTRYTYPE"] = ent.pop("entrytype")
for n in ["author", "editor"]:
if n in ent:
ent[n] = " and ".join(ent[n])
for key in ent.keys():
if key in skip_keys:
continue
ent[key] = latex_safe(str(ent[key]))
ents.append(ent)
fname = os.path.join(person_dir, pid) + ".bib"
with open(fname, "w", encoding="utf-8") as f:
f.write(bibwriter.write(bibdb))
return fname
def document_by_value(documents, address, value):
"""Get a specific document by one of its values
Parameters
----------
documents: generator
Generator which yields the documents
address: str or tuple
The address of the data in the document
value: any
The expected value for the document
Returns
-------
dict:
The first document which matches the request
"""
if isinstance(address, str):
address = (address,)
for g_doc in documents:
doc = deepcopy(g_doc)
for add in address:
doc = doc[add]
if doc == value:
return g_doc
def fuzzy_retrieval(documents, sources, value, case_sensitive=True):
"""Retrieve a document from the documents where value is compared against
multiple potential sources
Parameters
----------
documents: generator
The documents
sources: iterable
The potential data sources
value:
The value to compare against to find the document of interest
case_sensitive: Bool
When true will match case (Default = True)
Returns
-------
dict:
The document
Examples
--------
>>> fuzzy_retrieval(people, ['aka', 'name'], 'pi_name', case_sensitive = False)
This would get the person entry for which either the alias or the name was
``pi_name``.
"""
for doc in documents:
returns = []
for k in sources:
ret = doc.get(k, [])
if not isinstance(ret, list):
ret = [ret]
returns.extend(ret)
if not case_sensitive:
returns = [reti.lower() for reti in returns if
isinstance(reti, str)]
if isinstance(value, str):
if value.lower() in frozenset(returns):
return doc
else:
if value in frozenset(returns):
return doc
def number_suffix(number):
"""returns the suffix that adjectivises a number (st, nd, rd, th)
Paramters
---------
number: integer
The number. If number is not an integer, returns an empty string
Returns
-------
suffix: string
The suffix (st, nd, rd, th)
"""
if not isinstance(number, (int, float)):
return ""
if 10 < number < 20:
suffix = "th"
else:
suffix = {1: "st", 2: "nd", 3: "rd"}.get(number % 10, "th")
return suffix
def dereference_institution(input_record, institutions):
"""Tool for replacing placeholders for institutions with the actual
institution data. Note that the replacement is done inplace
Parameters
----------
input_record : dict
The record to dereference
institutions : iterable of dicts
The institutions
"""
inst = input_record.get("institution") or input_record.get("organization")
if not inst:
error = input_record.get("position") or input_record.get("degree")
print("WARNING: no institution or organization but found {}".format(
error))
db_inst = fuzzy_retrieval(institutions, ["name", "_id", "aka"], inst)
if db_inst:
input_record["institution"] = db_inst["name"]
input_record["organization"] = db_inst["name"]
if db_inst.get("country") == "USA":
state_country = db_inst.get("state")
else:
state_country = db_inst.get("country")
input_record["location"] = "{}, {}".format(db_inst["city"],
state_country)
if not db_inst.get("departments"):
print("WARNING: no departments in {}. {} sought".format(
db_inst.get("_id"), inst))
if "department" in input_record and db_inst.get("departments"):
input_record["department"] = fuzzy_retrieval(
[db_inst["departments"]], ["name", "aka"],
input_record["department"]
)
else:
input_record["department"] = inst
def merge_collections(a, b, target_id):
"""
merge two collections into a single merged collection
for keys that are in both collections, the value in b will be kept
Parameters
----------
a the inferior collection (will lose values of shared keys)
b the superior collection (will keep values of shared keys)
target_id str the name of the key used in b to dereference ids in a
Returns
-------
the combined collection. Note that it returns a collection only containing
merged items from a and b that are dereferenced in b, i.e., the merged
intercept. If you want the union you can update the returned collection
with a.
Examples
--------
>>> grants = merge_collections(self.gtx["proposals"], self.gtx["grants"], "proposal_id")
This would merge all entries in the proposals collection with entries in the
grants collection for which "_id" in proposals has the value of
"proposal_id" in grants.
"""
adict = {}
for k in a:
adict[k.get("_id")] = k
bdict = {}
for k in b:
bdict[k.get("_id")] = k
b_for_a = {}
for k in adict:
for kk, v in bdict.items():
if v.get(target_id, "") == k:
b_for_a[k] = kk
chained = {}
for k, v in b_for_a.items():
chained[k] = ChainDB(adict[k], bdict[v])
return list(chained.values())
def update_schemas(default_schema, user_schema):
"""
Merging the user schema into the default schema recursively and return the
merged schema. The default schema and user schema will not be modified
during the merging.
Parameters
----------
default_schema : dict
The default schema.
user_schema : dict
The user defined schema.
Returns
-------
updated_schema : dict
The merged schema.
"""
updated_schema = deepcopy(default_schema)
for key in user_schema.keys():
if (key in updated_schema) and isinstance(updated_schema[key],
dict) and isinstance(
user_schema[key], dict):
updated_schema[key] = update_schemas(updated_schema[key],
user_schema[key])
else:
updated_schema[key] = user_schema[key]
return updated_schema
def is_fully_loaded(appts):
status = True
earliest, latest = date.today(), date.today()
for appt in appts:
dates = get_dates(appt)
begin_date = dates['begin_date']
end_date = dates['end_date']
if latest == date.today():
latest = end_date
appt['begin_date'] = begin_date
appt['end_date'] = end_date
if begin_date < earliest:
earliest = begin_date
if end_date > latest:
latest = end_date
datearray = []
timespan = latest - earliest
for x in range(0, timespan.days):
datearray.append(earliest + timedelta(days=x))
loading = [0] * len(datearray)
for day in datearray:
for appt in appts:
if appt['begin_date'] <= day <= appt["end_date"]:
loading[datearray.index(day)] = loading[datearray.index(day)] + \
appt.get("loading")
if max(loading) > 1.0:
status = False
print("max {} at {}".format(max(loading),
datearray[
list(loading).index(max(loading))]))
elif min(loading) < 1.0:
status = False
print("min {} at {}".format(min(loading),
datearray[list(loading).index(min(loading))]
))
return status
def group(db, by):
"""
Group the document in the database according to the value of the doc[by] in db.
Parameters
----------
db : iterable
The database of documents.
by : basestring
The key to group the documents.
Returns
-------
grouped: dict
A dictionary mapping the feature value of group to the list of docs. All docs in the same generator have
the same value of doc[by].
Examples
--------
Here, we use a tuple of dict as an example of the database.
>>> db = ({"k": "v0"}, {"k": "v1"}, {"k": "v0"})
>>> group(db)
This will return
>>> {"v0": [{"k": "v0"}, {"k": "v0"}], "v1": [{"k": "v1"}]}
"""
grouped = {}
doc: dict
for doc in db:
key = doc.get(by)
if not key:
print("There is no field {} in {}".format(by, id_key(doc)))
elif key not in grouped:
grouped[key] = [doc]
else:
grouped[key].append(doc)
return grouped
def get_pi_id(rc):
"""
Gets the database id of the group PI
Parameters
----------
rc: runcontrol object
The runcontrol object. It must contain the 'groups' and 'people'
collections in the needed databases
Returns
-------
The database '_id' of the group PI
"""
groupiter = list(all_docs_from_collection(rc.client, "groups"))
peoplecoll = all_docs_from_collection(rc.client, "people")
pi_ref = [i.get("pi_name") for i in groupiter if
i.get("name").casefold() == rc.groupname.casefold()]
pi = fuzzy_retrieval(peoplecoll, ["_id", "aka", "name"], pi_ref[0])
return pi.get("_id")
def group_member_ids(ppl_coll, grpname):
"""Get a list of all group member ids
Parameters
----------
ppl_coll: collection (list of dicts)
The people collection that should contain the group members
grp: string
The id of the group in groups.yml
Returns
-------
set:
The set of ids of the people in the group
Notes
-----
- Groups that are being tracked are listed in the groups.yml collection
with a name and an id.
- People are in a group during an educational or employment period.
- To assign a person to a tracked group during one such period, add
a "group" key to that education/employment item with a value
that is the group id.
- This function takes the group id that is passed and searches
the people collection for all people that have been
assigned to that group in some period of time and returns a list of
"""
grpmembers = set()
for person in ppl_coll:
for k in ["education", "employment"]:
for position in person.get(k, {}):
if position.get("group", None) == grpname:
grpmembers.add(person["_id"])
return grpmembers
|
def is_since(y, sy, m=1, d=1, sm=1, sd=1):
"""
tests whether a date is on or since another date
Parameters
----------
y : int
the year to be tested
sy : int
the since year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
sd: int
the since day. Optional, defaults to 1
Returns
-------
True if the target date is the same as, or more recent than, the since date
"""
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
d = "{}/{}/{}".format(d, month_to_int(m), y)
since = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return since <= date
| 122 | 150 |
"""Misc. regolith tools.
"""
import email.utils
import os
import platform
import re
import sys
import time
from copy import deepcopy
from calendar import monthrange
from datetime import datetime, date, timedelta
from regolith.dates import month_to_int, date_to_float, get_dates
from regolith.sorters import doc_date_key, id_key, ene_date_key
from regolith.chained_db import ChainDB
try:
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
HAVE_BIBTEX_PARSER = True
except ImportError:
HAVE_BIBTEX_PARSER = False
LATEX_OPTS = ["-halt-on-error", "-file-line-error"]
if sys.version_info[0] >= 3:
string_types = (str, bytes)
unicode_type = str
else:
pass
# string_types = (str, unicode)
# unicode_type = unicode
DEFAULT_ENCODING = sys.getdefaultencoding()
ON_WINDOWS = platform.system() == "Windows"
ON_MAC = platform.system() == "Darwin"
ON_LINUX = platform.system() == "Linux"
ON_POSIX = os.name == "posix"
def dbdirname(db, rc):
"""Gets the database dir name."""
if db.get("local", False) is False:
dbsdir = os.path.join(rc.builddir, "_dbs")
dbdir = os.path.join(dbsdir, db["name"])
else:
dbdir = db["url"]
return dbdir
def dbpathname(db, rc):
"""Gets the database path name."""
dbdir = dbdirname(db, rc)
dbpath = os.path.join(dbdir, db["path"])
return dbpath
def fallback(cond, backup):
"""Decorator for returning the object if cond is true and a backup if
cond is false. """
def dec(obj):
return obj if cond else backup
return dec
def all_docs_from_collection(client, collname, copy=True):
"""Yield all entries in for all collections of a given name in a given
database. """
yield from client.all_documents(collname, copy=copy)
SHORT_MONTH_NAMES = (
None,
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sept",
"Oct",
"Nov",
"Dec",
)
def date_to_rfc822(y, m, d=1):
"""Converts a date to an RFC 822 formatted string."""
d = datetime(int(y), month_to_int(m), int(d))
return email.utils.format_datetime(d)
def rfc822now():
"""Creates a string of the current time according to RFC 822."""
now = datetime.utcnow()
return email.utils.format_datetime(now)
def gets(seq, key, default=None):
"""Gets a key from every element of a sequence if possible."""
for x in seq:
yield x.get(key, default)
def month_and_year(m=None, y=None):
"""Creates a string from month and year data, if available."""
if y is None:
return "present"
if m is None:
return str(y)
m = month_to_int(m)
return "{0} {1}".format(SHORT_MONTH_NAMES[m], y)
def is_since(y, sy, m=1, d=1, sm=1, sd=1):
"""
tests whether a date is on or since another date
Parameters
----------
y : int
the year to be tested
sy : int
the since year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
sd: int
the since day. Optional, defaults to 1
Returns
-------
True if the target date is the same as, or more recent than, the since date
"""
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
d = "{}/{}/{}".format(d, month_to_int(m), y)
since = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return since <= date
def is_before(y, by, m=12, d=None, bm=12, bd=None):
"""
tests whether a date is on or before another date
Parameters
----------
y : int
the year to be tested
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Dec
d : int
the day to be tested. Defaults to last day of the month
bm : int or str
the before month. Optional, defaults to Dec
bd: int
the before day. Optional, defaults to last day of the month
Returns
-------
True if the target date is the same as, or earlier than, the before date
"""
if not d:
d = monthrange(y, month_to_int(m))[1]
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
b = "{}/{}/{}".format(bd, month_to_int(bm), by)
d = "{}/{}/{}".format(d, month_to_int(m), y)
before = time.mktime(datetime.strptime(b, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return before >= date
def is_between(y, sy, by, m=1, d=1, sm=1, sd=1, bm=12, bd=None):
"""
tests whether a date is on or between two other dates
returns true if the target date is between the since date and the before
date, inclusive.
Parameters
----------
y : int
the year to be tested
sy : int
the since year
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
bm : int or str
the before month. Optional, defaults to Dec
sd: int
the since day. Optional, defaults to 1
bd: int
the before day. Optional, defaults to 28
Returns
-------
True if the target date is between the since date and the before date,
inclusive (i.e., returns true if the target date is the same as either the
since date or the before date)
"""
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
return is_since(y, sy, m=m, d=d, sm=sm, sd=sd) and is_before(
y, by, m=m, d=d, bm=bm, bd=bd
)
def has_started(sy, sm=None, sd=None):
"""
true if today is after the dates given, inclusive
Parameters
----------
sy : int
the year to check today against
sm : int or str.
the month to check today against. Should be integer or in regolith MONTHS.
default is 1
sd : int.
the day to check today against. Default is 1
Returns
-------
bool
true if today is after dates given
"""
if not sm:
sm = 1
if not sd:
sd = 1
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
start = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
return start <= time.time()
def has_finished(ey, em=None, ed=None):
"""
true if today is before the dates given, inclusive
Parameters
----------
ey : int
end year, the year to check today against
em : int or str.
end month, the month to check today against. Should be integer or in regolith MONTHS.
default is 1
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
if not em:
em = 12
if not ed:
ed = monthrange(ey, month_to_int(em))[1]
e = "{}/{}/{}".format(ed, month_to_int(em), ey)
end = time.mktime(datetime.strptime(e, "%d/%m/%Y").timetuple())
return end <= time.time()
def is_current(sy, ey, sm=None, sd=None, em=None, ed=None):
"""
true if today is between the dates given, inclusive
Parameters
----------
sy : int
start year, the year to check today is after
ey : int
end year, the year to check today is before
sm : int or str
start month, the month to check today is after. Should be integer or in
regolith MONTHS. Default is 1
sd : int
start day, the day to check today after. Default is 1
em : int or str.
end month, the month to check today against. Should be integer or in
regolith MONTHS. Default is 12
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
return has_started(sy, sm, sd) and not has_finished(ey, em, ed)
def filter_publications(citations, authors, reverse=False, bold=True):
"""Filter publications by the author(s)/editor(s)
Parameters
----------
citations : list of dict
The publication citations
authors : set of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
bold : bool, optional
If True put latex bold around the author(s) in question
"""
pubs = []
for pub in citations:
if (
len((set(pub.get("author", [])) | set(
pub.get("editor", []))) & authors)
== 0
):
continue
pub = deepcopy(pub)
if bold:
bold_self = []
for a in pub["author"]:
if a in authors:
bold_self.append("\\textbf{" + a + "}")
else:
bold_self.append(a)
pub["author"] = bold_self
else:
pub = deepcopy(pub)
pubs.append(pub)
pubs.sort(key=doc_date_key, reverse=reverse)
return pubs
def filter_projects(projects, authors, reverse=False):
"""Filter projects by the author(s)
Parameters
----------
projects : list of dict
The publication citations
authors : set of list of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
"""
projs = []
for proj in projects:
team_names = set(gets(proj["team"], "name"))
if len(team_names & authors) == 0:
continue
# FIXME delete these lines if not required. I think they are wrong (SJLB)
# proj = dict(proj)
# proj["team"] = [x for x in proj["team"] if x["name"] in authors]
projs.append(proj)
projs.sort(key=id_key, reverse=reverse)
return projs
def filter_grants(input_grants, names, pi=True, reverse=True, multi_pi=False):
"""Filter grants by those involved
Parameters
----------
input_grants : list of dict
The grants to filter
names : set of str
The authors to be filtered against
pi : bool, optional
If True add the grant amount to that person's total amount
reverse : bool, optional
If True reverse the order, defaults to False
multi_pi : bool, optional
If True compute sub-awards for multi PI grants, defaults to False
"""
grants = []
total_amount = 0.0
subaward_amount = 0.0
for grant in input_grants:
team_names = set(gets(grant["team"], "name"))
if len(team_names & names) == 0:
continue
grant = deepcopy(grant)
person = [x for x in grant["team"] if x["name"] in names][0]
if pi:
if person["position"].lower() == "pi":
total_amount += grant["amount"]
else:
continue
elif multi_pi:
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["multi_pi"] = any(gets(grant["team"], "subaward_amount"))
else:
if person["position"].lower() == "pi":
continue
else:
total_amount += grant["amount"]
subaward_amount += person.get("subaward_amount", 0.0)
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["pi"] = [
x for x in grant["team"] if x["position"].lower() == "pi"
][0]
grant["me"] = person
grants.append(grant)
grants.sort(key=ene_date_key, reverse=reverse)
return grants, total_amount, subaward_amount
def awards_grants_honors(p):
"""Make sorted awards grants and honors list.
Parameters
----------
p : dict
The person entry
"""
aghs = []
for x in p.get("funding", ()):
d = {
"description": "{0} ({1}{2:,})".format(
latex_safe(x["name"]),
x.get("currency", "$").replace("$", "\$"),
x["value"],
),
"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0)),
}
aghs.append(d)
for x in p.get("service", []) + p.get("honors", []):
d = {"description": latex_safe(x["name"])}
if "year" in x:
d.update(
{"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0))}
)
elif "begin_year" in x and "end_year" in x:
d.update(
{
"year": "{}-{}".format(x["begin_year"], x["end_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
elif "begin_year" in x:
d.update(
{
"year": "{}".format(x["begin_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
aghs.append(d)
aghs.sort(key=(lambda x: x.get("_key", 0.0)), reverse=True)
return aghs
HTTP_RE = re.compile(
r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
)
def latex_safe_url(s):
"""Makes a string that is a URL latex safe."""
return s.replace("#", r"\#")
def latex_safe(s, url_check=True, wrapper="url"):
"""Make string latex safe
Parameters
----------
s : str
url_check : bool, optional
If True check for URLs and wrap them, if False check for URL but don't
wrap, defaults to True
wrapper : str, optional
The wrapper for wrapping urls defaults to url
"""
if not s:
return s
if url_check:
# If it looks like a URL make it a latex URL
url_search = HTTP_RE.search(s)
if url_search:
url = r"{start}\{wrapper}{{{s}}}{end}".format(
start=(latex_safe(s[: url_search.start()])),
end=(latex_safe(s[url_search.end():])),
wrapper=wrapper,
s=latex_safe_url(s[url_search.start(): url_search.end()]),
)
return url
return (
s.replace("&", r"\&")
.replace("$", r"\$")
.replace("#", r"\#")
.replace("_", r"\_")
)
def make_bibtex_file(pubs, pid, person_dir="."):
"""Make a bibtex file given the publications
Parameters
----------
pubs : list of dict
The publications
pid : str
The person id
person_dir : str, optional
The person's directory
"""
if not HAVE_BIBTEX_PARSER:
return None
skip_keys = {"ID", "ENTRYTYPE", "author"}
bibdb = BibDatabase()
bibwriter = BibTexWriter()
bibdb.entries = ents = []
for pub in pubs:
ent = dict(pub)
ent["ID"] = ent.pop("_id")
ent["ENTRYTYPE"] = ent.pop("entrytype")
for n in ["author", "editor"]:
if n in ent:
ent[n] = " and ".join(ent[n])
for key in ent.keys():
if key in skip_keys:
continue
ent[key] = latex_safe(str(ent[key]))
ents.append(ent)
fname = os.path.join(person_dir, pid) + ".bib"
with open(fname, "w", encoding="utf-8") as f:
f.write(bibwriter.write(bibdb))
return fname
def document_by_value(documents, address, value):
"""Get a specific document by one of its values
Parameters
----------
documents: generator
Generator which yields the documents
address: str or tuple
The address of the data in the document
value: any
The expected value for the document
Returns
-------
dict:
The first document which matches the request
"""
if isinstance(address, str):
address = (address,)
for g_doc in documents:
doc = deepcopy(g_doc)
for add in address:
doc = doc[add]
if doc == value:
return g_doc
def fuzzy_retrieval(documents, sources, value, case_sensitive=True):
"""Retrieve a document from the documents where value is compared against
multiple potential sources
Parameters
----------
documents: generator
The documents
sources: iterable
The potential data sources
value:
The value to compare against to find the document of interest
case_sensitive: Bool
When true will match case (Default = True)
Returns
-------
dict:
The document
Examples
--------
>>> fuzzy_retrieval(people, ['aka', 'name'], 'pi_name', case_sensitive = False)
This would get the person entry for which either the alias or the name was
``pi_name``.
"""
for doc in documents:
returns = []
for k in sources:
ret = doc.get(k, [])
if not isinstance(ret, list):
ret = [ret]
returns.extend(ret)
if not case_sensitive:
returns = [reti.lower() for reti in returns if
isinstance(reti, str)]
if isinstance(value, str):
if value.lower() in frozenset(returns):
return doc
else:
if value in frozenset(returns):
return doc
def number_suffix(number):
"""returns the suffix that adjectivises a number (st, nd, rd, th)
Paramters
---------
number: integer
The number. If number is not an integer, returns an empty string
Returns
-------
suffix: string
The suffix (st, nd, rd, th)
"""
if not isinstance(number, (int, float)):
return ""
if 10 < number < 20:
suffix = "th"
else:
suffix = {1: "st", 2: "nd", 3: "rd"}.get(number % 10, "th")
return suffix
def dereference_institution(input_record, institutions):
"""Tool for replacing placeholders for institutions with the actual
institution data. Note that the replacement is done inplace
Parameters
----------
input_record : dict
The record to dereference
institutions : iterable of dicts
The institutions
"""
inst = input_record.get("institution") or input_record.get("organization")
if not inst:
error = input_record.get("position") or input_record.get("degree")
print("WARNING: no institution or organization but found {}".format(
error))
db_inst = fuzzy_retrieval(institutions, ["name", "_id", "aka"], inst)
if db_inst:
input_record["institution"] = db_inst["name"]
input_record["organization"] = db_inst["name"]
if db_inst.get("country") == "USA":
state_country = db_inst.get("state")
else:
state_country = db_inst.get("country")
input_record["location"] = "{}, {}".format(db_inst["city"],
state_country)
if not db_inst.get("departments"):
print("WARNING: no departments in {}. {} sought".format(
db_inst.get("_id"), inst))
if "department" in input_record and db_inst.get("departments"):
input_record["department"] = fuzzy_retrieval(
[db_inst["departments"]], ["name", "aka"],
input_record["department"]
)
else:
input_record["department"] = inst
def merge_collections(a, b, target_id):
"""
merge two collections into a single merged collection
for keys that are in both collections, the value in b will be kept
Parameters
----------
a the inferior collection (will lose values of shared keys)
b the superior collection (will keep values of shared keys)
target_id str the name of the key used in b to dereference ids in a
Returns
-------
the combined collection. Note that it returns a collection only containing
merged items from a and b that are dereferenced in b, i.e., the merged
intercept. If you want the union you can update the returned collection
with a.
Examples
--------
>>> grants = merge_collections(self.gtx["proposals"], self.gtx["grants"], "proposal_id")
This would merge all entries in the proposals collection with entries in the
grants collection for which "_id" in proposals has the value of
"proposal_id" in grants.
"""
adict = {}
for k in a:
adict[k.get("_id")] = k
bdict = {}
for k in b:
bdict[k.get("_id")] = k
b_for_a = {}
for k in adict:
for kk, v in bdict.items():
if v.get(target_id, "") == k:
b_for_a[k] = kk
chained = {}
for k, v in b_for_a.items():
chained[k] = ChainDB(adict[k], bdict[v])
return list(chained.values())
def update_schemas(default_schema, user_schema):
"""
Merging the user schema into the default schema recursively and return the
merged schema. The default schema and user schema will not be modified
during the merging.
Parameters
----------
default_schema : dict
The default schema.
user_schema : dict
The user defined schema.
Returns
-------
updated_schema : dict
The merged schema.
"""
updated_schema = deepcopy(default_schema)
for key in user_schema.keys():
if (key in updated_schema) and isinstance(updated_schema[key],
dict) and isinstance(
user_schema[key], dict):
updated_schema[key] = update_schemas(updated_schema[key],
user_schema[key])
else:
updated_schema[key] = user_schema[key]
return updated_schema
def is_fully_loaded(appts):
status = True
earliest, latest = date.today(), date.today()
for appt in appts:
dates = get_dates(appt)
begin_date = dates['begin_date']
end_date = dates['end_date']
if latest == date.today():
latest = end_date
appt['begin_date'] = begin_date
appt['end_date'] = end_date
if begin_date < earliest:
earliest = begin_date
if end_date > latest:
latest = end_date
datearray = []
timespan = latest - earliest
for x in range(0, timespan.days):
datearray.append(earliest + timedelta(days=x))
loading = [0] * len(datearray)
for day in datearray:
for appt in appts:
if appt['begin_date'] <= day <= appt["end_date"]:
loading[datearray.index(day)] = loading[datearray.index(day)] + \
appt.get("loading")
if max(loading) > 1.0:
status = False
print("max {} at {}".format(max(loading),
datearray[
list(loading).index(max(loading))]))
elif min(loading) < 1.0:
status = False
print("min {} at {}".format(min(loading),
datearray[list(loading).index(min(loading))]
))
return status
def group(db, by):
"""
Group the document in the database according to the value of the doc[by] in db.
Parameters
----------
db : iterable
The database of documents.
by : basestring
The key to group the documents.
Returns
-------
grouped: dict
A dictionary mapping the feature value of group to the list of docs. All docs in the same generator have
the same value of doc[by].
Examples
--------
Here, we use a tuple of dict as an example of the database.
>>> db = ({"k": "v0"}, {"k": "v1"}, {"k": "v0"})
>>> group(db)
This will return
>>> {"v0": [{"k": "v0"}, {"k": "v0"}], "v1": [{"k": "v1"}]}
"""
grouped = {}
doc: dict
for doc in db:
key = doc.get(by)
if not key:
print("There is no field {} in {}".format(by, id_key(doc)))
elif key not in grouped:
grouped[key] = [doc]
else:
grouped[key].append(doc)
return grouped
def get_pi_id(rc):
"""
Gets the database id of the group PI
Parameters
----------
rc: runcontrol object
The runcontrol object. It must contain the 'groups' and 'people'
collections in the needed databases
Returns
-------
The database '_id' of the group PI
"""
groupiter = list(all_docs_from_collection(rc.client, "groups"))
peoplecoll = all_docs_from_collection(rc.client, "people")
pi_ref = [i.get("pi_name") for i in groupiter if
i.get("name").casefold() == rc.groupname.casefold()]
pi = fuzzy_retrieval(peoplecoll, ["_id", "aka", "name"], pi_ref[0])
return pi.get("_id")
def group_member_ids(ppl_coll, grpname):
"""Get a list of all group member ids
Parameters
----------
ppl_coll: collection (list of dicts)
The people collection that should contain the group members
grp: string
The id of the group in groups.yml
Returns
-------
set:
The set of ids of the people in the group
Notes
-----
- Groups that are being tracked are listed in the groups.yml collection
with a name and an id.
- People are in a group during an educational or employment period.
- To assign a person to a tracked group during one such period, add
a "group" key to that education/employment item with a value
that is the group id.
- This function takes the group id that is passed and searches
the people collection for all people that have been
assigned to that group in some period of time and returns a list of
"""
grpmembers = set()
for person in ppl_coll:
for k in ["education", "employment"]:
for position in person.get(k, {}):
if position.get("group", None) == grpname:
grpmembers.add(person["_id"])
return grpmembers
|
is_before
|
tests whether a date is on or before another date
Parameters
----------
y : int
the year to be tested
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Dec
d : int
the day to be tested. Defaults to last day of the month
bm : int or str
the before month. Optional, defaults to Dec
bd: int
the before day. Optional, defaults to last day of the month
Returns
-------
True if the target date is the same as, or earlier than, the before date
|
"""Misc. regolith tools.
"""
import email.utils
import os
import platform
import re
import sys
import time
from copy import deepcopy
from calendar import monthrange
from datetime import datetime, date, timedelta
from regolith.dates import month_to_int, date_to_float, get_dates
from regolith.sorters import doc_date_key, id_key, ene_date_key
from regolith.chained_db import ChainDB
try:
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
HAVE_BIBTEX_PARSER = True
except ImportError:
HAVE_BIBTEX_PARSER = False
LATEX_OPTS = ["-halt-on-error", "-file-line-error"]
if sys.version_info[0] >= 3:
string_types = (str, bytes)
unicode_type = str
else:
pass
# string_types = (str, unicode)
# unicode_type = unicode
DEFAULT_ENCODING = sys.getdefaultencoding()
ON_WINDOWS = platform.system() == "Windows"
ON_MAC = platform.system() == "Darwin"
ON_LINUX = platform.system() == "Linux"
ON_POSIX = os.name == "posix"
def dbdirname(db, rc):
"""Gets the database dir name."""
if db.get("local", False) is False:
dbsdir = os.path.join(rc.builddir, "_dbs")
dbdir = os.path.join(dbsdir, db["name"])
else:
dbdir = db["url"]
return dbdir
def dbpathname(db, rc):
"""Gets the database path name."""
dbdir = dbdirname(db, rc)
dbpath = os.path.join(dbdir, db["path"])
return dbpath
def fallback(cond, backup):
"""Decorator for returning the object if cond is true and a backup if
cond is false. """
def dec(obj):
return obj if cond else backup
return dec
def all_docs_from_collection(client, collname, copy=True):
"""Yield all entries in for all collections of a given name in a given
database. """
yield from client.all_documents(collname, copy=copy)
SHORT_MONTH_NAMES = (
None,
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sept",
"Oct",
"Nov",
"Dec",
)
def date_to_rfc822(y, m, d=1):
"""Converts a date to an RFC 822 formatted string."""
d = datetime(int(y), month_to_int(m), int(d))
return email.utils.format_datetime(d)
def rfc822now():
"""Creates a string of the current time according to RFC 822."""
now = datetime.utcnow()
return email.utils.format_datetime(now)
def gets(seq, key, default=None):
"""Gets a key from every element of a sequence if possible."""
for x in seq:
yield x.get(key, default)
def month_and_year(m=None, y=None):
"""Creates a string from month and year data, if available."""
if y is None:
return "present"
if m is None:
return str(y)
m = month_to_int(m)
return "{0} {1}".format(SHORT_MONTH_NAMES[m], y)
def is_since(y, sy, m=1, d=1, sm=1, sd=1):
"""
tests whether a date is on or since another date
Parameters
----------
y : int
the year to be tested
sy : int
the since year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
sd: int
the since day. Optional, defaults to 1
Returns
-------
True if the target date is the same as, or more recent than, the since date
"""
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
d = "{}/{}/{}".format(d, month_to_int(m), y)
since = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return since <= date
# MASKED: is_before function (lines 153-185)
def is_between(y, sy, by, m=1, d=1, sm=1, sd=1, bm=12, bd=None):
"""
tests whether a date is on or between two other dates
returns true if the target date is between the since date and the before
date, inclusive.
Parameters
----------
y : int
the year to be tested
sy : int
the since year
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
bm : int or str
the before month. Optional, defaults to Dec
sd: int
the since day. Optional, defaults to 1
bd: int
the before day. Optional, defaults to 28
Returns
-------
True if the target date is between the since date and the before date,
inclusive (i.e., returns true if the target date is the same as either the
since date or the before date)
"""
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
return is_since(y, sy, m=m, d=d, sm=sm, sd=sd) and is_before(
y, by, m=m, d=d, bm=bm, bd=bd
)
def has_started(sy, sm=None, sd=None):
"""
true if today is after the dates given, inclusive
Parameters
----------
sy : int
the year to check today against
sm : int or str.
the month to check today against. Should be integer or in regolith MONTHS.
default is 1
sd : int.
the day to check today against. Default is 1
Returns
-------
bool
true if today is after dates given
"""
if not sm:
sm = 1
if not sd:
sd = 1
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
start = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
return start <= time.time()
def has_finished(ey, em=None, ed=None):
"""
true if today is before the dates given, inclusive
Parameters
----------
ey : int
end year, the year to check today against
em : int or str.
end month, the month to check today against. Should be integer or in regolith MONTHS.
default is 1
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
if not em:
em = 12
if not ed:
ed = monthrange(ey, month_to_int(em))[1]
e = "{}/{}/{}".format(ed, month_to_int(em), ey)
end = time.mktime(datetime.strptime(e, "%d/%m/%Y").timetuple())
return end <= time.time()
def is_current(sy, ey, sm=None, sd=None, em=None, ed=None):
"""
true if today is between the dates given, inclusive
Parameters
----------
sy : int
start year, the year to check today is after
ey : int
end year, the year to check today is before
sm : int or str
start month, the month to check today is after. Should be integer or in
regolith MONTHS. Default is 1
sd : int
start day, the day to check today after. Default is 1
em : int or str.
end month, the month to check today against. Should be integer or in
regolith MONTHS. Default is 12
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
return has_started(sy, sm, sd) and not has_finished(ey, em, ed)
def filter_publications(citations, authors, reverse=False, bold=True):
"""Filter publications by the author(s)/editor(s)
Parameters
----------
citations : list of dict
The publication citations
authors : set of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
bold : bool, optional
If True put latex bold around the author(s) in question
"""
pubs = []
for pub in citations:
if (
len((set(pub.get("author", [])) | set(
pub.get("editor", []))) & authors)
== 0
):
continue
pub = deepcopy(pub)
if bold:
bold_self = []
for a in pub["author"]:
if a in authors:
bold_self.append("\\textbf{" + a + "}")
else:
bold_self.append(a)
pub["author"] = bold_self
else:
pub = deepcopy(pub)
pubs.append(pub)
pubs.sort(key=doc_date_key, reverse=reverse)
return pubs
def filter_projects(projects, authors, reverse=False):
"""Filter projects by the author(s)
Parameters
----------
projects : list of dict
The publication citations
authors : set of list of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
"""
projs = []
for proj in projects:
team_names = set(gets(proj["team"], "name"))
if len(team_names & authors) == 0:
continue
# FIXME delete these lines if not required. I think they are wrong (SJLB)
# proj = dict(proj)
# proj["team"] = [x for x in proj["team"] if x["name"] in authors]
projs.append(proj)
projs.sort(key=id_key, reverse=reverse)
return projs
def filter_grants(input_grants, names, pi=True, reverse=True, multi_pi=False):
"""Filter grants by those involved
Parameters
----------
input_grants : list of dict
The grants to filter
names : set of str
The authors to be filtered against
pi : bool, optional
If True add the grant amount to that person's total amount
reverse : bool, optional
If True reverse the order, defaults to False
multi_pi : bool, optional
If True compute sub-awards for multi PI grants, defaults to False
"""
grants = []
total_amount = 0.0
subaward_amount = 0.0
for grant in input_grants:
team_names = set(gets(grant["team"], "name"))
if len(team_names & names) == 0:
continue
grant = deepcopy(grant)
person = [x for x in grant["team"] if x["name"] in names][0]
if pi:
if person["position"].lower() == "pi":
total_amount += grant["amount"]
else:
continue
elif multi_pi:
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["multi_pi"] = any(gets(grant["team"], "subaward_amount"))
else:
if person["position"].lower() == "pi":
continue
else:
total_amount += grant["amount"]
subaward_amount += person.get("subaward_amount", 0.0)
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["pi"] = [
x for x in grant["team"] if x["position"].lower() == "pi"
][0]
grant["me"] = person
grants.append(grant)
grants.sort(key=ene_date_key, reverse=reverse)
return grants, total_amount, subaward_amount
def awards_grants_honors(p):
"""Make sorted awards grants and honors list.
Parameters
----------
p : dict
The person entry
"""
aghs = []
for x in p.get("funding", ()):
d = {
"description": "{0} ({1}{2:,})".format(
latex_safe(x["name"]),
x.get("currency", "$").replace("$", "\$"),
x["value"],
),
"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0)),
}
aghs.append(d)
for x in p.get("service", []) + p.get("honors", []):
d = {"description": latex_safe(x["name"])}
if "year" in x:
d.update(
{"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0))}
)
elif "begin_year" in x and "end_year" in x:
d.update(
{
"year": "{}-{}".format(x["begin_year"], x["end_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
elif "begin_year" in x:
d.update(
{
"year": "{}".format(x["begin_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
aghs.append(d)
aghs.sort(key=(lambda x: x.get("_key", 0.0)), reverse=True)
return aghs
HTTP_RE = re.compile(
r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
)
def latex_safe_url(s):
"""Makes a string that is a URL latex safe."""
return s.replace("#", r"\#")
def latex_safe(s, url_check=True, wrapper="url"):
"""Make string latex safe
Parameters
----------
s : str
url_check : bool, optional
If True check for URLs and wrap them, if False check for URL but don't
wrap, defaults to True
wrapper : str, optional
The wrapper for wrapping urls defaults to url
"""
if not s:
return s
if url_check:
# If it looks like a URL make it a latex URL
url_search = HTTP_RE.search(s)
if url_search:
url = r"{start}\{wrapper}{{{s}}}{end}".format(
start=(latex_safe(s[: url_search.start()])),
end=(latex_safe(s[url_search.end():])),
wrapper=wrapper,
s=latex_safe_url(s[url_search.start(): url_search.end()]),
)
return url
return (
s.replace("&", r"\&")
.replace("$", r"\$")
.replace("#", r"\#")
.replace("_", r"\_")
)
def make_bibtex_file(pubs, pid, person_dir="."):
"""Make a bibtex file given the publications
Parameters
----------
pubs : list of dict
The publications
pid : str
The person id
person_dir : str, optional
The person's directory
"""
if not HAVE_BIBTEX_PARSER:
return None
skip_keys = {"ID", "ENTRYTYPE", "author"}
bibdb = BibDatabase()
bibwriter = BibTexWriter()
bibdb.entries = ents = []
for pub in pubs:
ent = dict(pub)
ent["ID"] = ent.pop("_id")
ent["ENTRYTYPE"] = ent.pop("entrytype")
for n in ["author", "editor"]:
if n in ent:
ent[n] = " and ".join(ent[n])
for key in ent.keys():
if key in skip_keys:
continue
ent[key] = latex_safe(str(ent[key]))
ents.append(ent)
fname = os.path.join(person_dir, pid) + ".bib"
with open(fname, "w", encoding="utf-8") as f:
f.write(bibwriter.write(bibdb))
return fname
def document_by_value(documents, address, value):
"""Get a specific document by one of its values
Parameters
----------
documents: generator
Generator which yields the documents
address: str or tuple
The address of the data in the document
value: any
The expected value for the document
Returns
-------
dict:
The first document which matches the request
"""
if isinstance(address, str):
address = (address,)
for g_doc in documents:
doc = deepcopy(g_doc)
for add in address:
doc = doc[add]
if doc == value:
return g_doc
def fuzzy_retrieval(documents, sources, value, case_sensitive=True):
"""Retrieve a document from the documents where value is compared against
multiple potential sources
Parameters
----------
documents: generator
The documents
sources: iterable
The potential data sources
value:
The value to compare against to find the document of interest
case_sensitive: Bool
When true will match case (Default = True)
Returns
-------
dict:
The document
Examples
--------
>>> fuzzy_retrieval(people, ['aka', 'name'], 'pi_name', case_sensitive = False)
This would get the person entry for which either the alias or the name was
``pi_name``.
"""
for doc in documents:
returns = []
for k in sources:
ret = doc.get(k, [])
if not isinstance(ret, list):
ret = [ret]
returns.extend(ret)
if not case_sensitive:
returns = [reti.lower() for reti in returns if
isinstance(reti, str)]
if isinstance(value, str):
if value.lower() in frozenset(returns):
return doc
else:
if value in frozenset(returns):
return doc
def number_suffix(number):
"""returns the suffix that adjectivises a number (st, nd, rd, th)
Paramters
---------
number: integer
The number. If number is not an integer, returns an empty string
Returns
-------
suffix: string
The suffix (st, nd, rd, th)
"""
if not isinstance(number, (int, float)):
return ""
if 10 < number < 20:
suffix = "th"
else:
suffix = {1: "st", 2: "nd", 3: "rd"}.get(number % 10, "th")
return suffix
def dereference_institution(input_record, institutions):
"""Tool for replacing placeholders for institutions with the actual
institution data. Note that the replacement is done inplace
Parameters
----------
input_record : dict
The record to dereference
institutions : iterable of dicts
The institutions
"""
inst = input_record.get("institution") or input_record.get("organization")
if not inst:
error = input_record.get("position") or input_record.get("degree")
print("WARNING: no institution or organization but found {}".format(
error))
db_inst = fuzzy_retrieval(institutions, ["name", "_id", "aka"], inst)
if db_inst:
input_record["institution"] = db_inst["name"]
input_record["organization"] = db_inst["name"]
if db_inst.get("country") == "USA":
state_country = db_inst.get("state")
else:
state_country = db_inst.get("country")
input_record["location"] = "{}, {}".format(db_inst["city"],
state_country)
if not db_inst.get("departments"):
print("WARNING: no departments in {}. {} sought".format(
db_inst.get("_id"), inst))
if "department" in input_record and db_inst.get("departments"):
input_record["department"] = fuzzy_retrieval(
[db_inst["departments"]], ["name", "aka"],
input_record["department"]
)
else:
input_record["department"] = inst
def merge_collections(a, b, target_id):
"""
merge two collections into a single merged collection
for keys that are in both collections, the value in b will be kept
Parameters
----------
a the inferior collection (will lose values of shared keys)
b the superior collection (will keep values of shared keys)
target_id str the name of the key used in b to dereference ids in a
Returns
-------
the combined collection. Note that it returns a collection only containing
merged items from a and b that are dereferenced in b, i.e., the merged
intercept. If you want the union you can update the returned collection
with a.
Examples
--------
>>> grants = merge_collections(self.gtx["proposals"], self.gtx["grants"], "proposal_id")
This would merge all entries in the proposals collection with entries in the
grants collection for which "_id" in proposals has the value of
"proposal_id" in grants.
"""
adict = {}
for k in a:
adict[k.get("_id")] = k
bdict = {}
for k in b:
bdict[k.get("_id")] = k
b_for_a = {}
for k in adict:
for kk, v in bdict.items():
if v.get(target_id, "") == k:
b_for_a[k] = kk
chained = {}
for k, v in b_for_a.items():
chained[k] = ChainDB(adict[k], bdict[v])
return list(chained.values())
def update_schemas(default_schema, user_schema):
"""
Merging the user schema into the default schema recursively and return the
merged schema. The default schema and user schema will not be modified
during the merging.
Parameters
----------
default_schema : dict
The default schema.
user_schema : dict
The user defined schema.
Returns
-------
updated_schema : dict
The merged schema.
"""
updated_schema = deepcopy(default_schema)
for key in user_schema.keys():
if (key in updated_schema) and isinstance(updated_schema[key],
dict) and isinstance(
user_schema[key], dict):
updated_schema[key] = update_schemas(updated_schema[key],
user_schema[key])
else:
updated_schema[key] = user_schema[key]
return updated_schema
def is_fully_loaded(appts):
status = True
earliest, latest = date.today(), date.today()
for appt in appts:
dates = get_dates(appt)
begin_date = dates['begin_date']
end_date = dates['end_date']
if latest == date.today():
latest = end_date
appt['begin_date'] = begin_date
appt['end_date'] = end_date
if begin_date < earliest:
earliest = begin_date
if end_date > latest:
latest = end_date
datearray = []
timespan = latest - earliest
for x in range(0, timespan.days):
datearray.append(earliest + timedelta(days=x))
loading = [0] * len(datearray)
for day in datearray:
for appt in appts:
if appt['begin_date'] <= day <= appt["end_date"]:
loading[datearray.index(day)] = loading[datearray.index(day)] + \
appt.get("loading")
if max(loading) > 1.0:
status = False
print("max {} at {}".format(max(loading),
datearray[
list(loading).index(max(loading))]))
elif min(loading) < 1.0:
status = False
print("min {} at {}".format(min(loading),
datearray[list(loading).index(min(loading))]
))
return status
def group(db, by):
"""
Group the document in the database according to the value of the doc[by] in db.
Parameters
----------
db : iterable
The database of documents.
by : basestring
The key to group the documents.
Returns
-------
grouped: dict
A dictionary mapping the feature value of group to the list of docs. All docs in the same generator have
the same value of doc[by].
Examples
--------
Here, we use a tuple of dict as an example of the database.
>>> db = ({"k": "v0"}, {"k": "v1"}, {"k": "v0"})
>>> group(db)
This will return
>>> {"v0": [{"k": "v0"}, {"k": "v0"}], "v1": [{"k": "v1"}]}
"""
grouped = {}
doc: dict
for doc in db:
key = doc.get(by)
if not key:
print("There is no field {} in {}".format(by, id_key(doc)))
elif key not in grouped:
grouped[key] = [doc]
else:
grouped[key].append(doc)
return grouped
def get_pi_id(rc):
"""
Gets the database id of the group PI
Parameters
----------
rc: runcontrol object
The runcontrol object. It must contain the 'groups' and 'people'
collections in the needed databases
Returns
-------
The database '_id' of the group PI
"""
groupiter = list(all_docs_from_collection(rc.client, "groups"))
peoplecoll = all_docs_from_collection(rc.client, "people")
pi_ref = [i.get("pi_name") for i in groupiter if
i.get("name").casefold() == rc.groupname.casefold()]
pi = fuzzy_retrieval(peoplecoll, ["_id", "aka", "name"], pi_ref[0])
return pi.get("_id")
def group_member_ids(ppl_coll, grpname):
"""Get a list of all group member ids
Parameters
----------
ppl_coll: collection (list of dicts)
The people collection that should contain the group members
grp: string
The id of the group in groups.yml
Returns
-------
set:
The set of ids of the people in the group
Notes
-----
- Groups that are being tracked are listed in the groups.yml collection
with a name and an id.
- People are in a group during an educational or employment period.
- To assign a person to a tracked group during one such period, add
a "group" key to that education/employment item with a value
that is the group id.
- This function takes the group id that is passed and searches
the people collection for all people that have been
assigned to that group in some period of time and returns a list of
"""
grpmembers = set()
for person in ppl_coll:
for k in ["education", "employment"]:
for position in person.get(k, {}):
if position.get("group", None) == grpname:
grpmembers.add(person["_id"])
return grpmembers
|
def is_before(y, by, m=12, d=None, bm=12, bd=None):
"""
tests whether a date is on or before another date
Parameters
----------
y : int
the year to be tested
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Dec
d : int
the day to be tested. Defaults to last day of the month
bm : int or str
the before month. Optional, defaults to Dec
bd: int
the before day. Optional, defaults to last day of the month
Returns
-------
True if the target date is the same as, or earlier than, the before date
"""
if not d:
d = monthrange(y, month_to_int(m))[1]
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
b = "{}/{}/{}".format(bd, month_to_int(bm), by)
d = "{}/{}/{}".format(d, month_to_int(m), y)
before = time.mktime(datetime.strptime(b, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return before >= date
| 153 | 185 |
"""Misc. regolith tools.
"""
import email.utils
import os
import platform
import re
import sys
import time
from copy import deepcopy
from calendar import monthrange
from datetime import datetime, date, timedelta
from regolith.dates import month_to_int, date_to_float, get_dates
from regolith.sorters import doc_date_key, id_key, ene_date_key
from regolith.chained_db import ChainDB
try:
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
HAVE_BIBTEX_PARSER = True
except ImportError:
HAVE_BIBTEX_PARSER = False
LATEX_OPTS = ["-halt-on-error", "-file-line-error"]
if sys.version_info[0] >= 3:
string_types = (str, bytes)
unicode_type = str
else:
pass
# string_types = (str, unicode)
# unicode_type = unicode
DEFAULT_ENCODING = sys.getdefaultencoding()
ON_WINDOWS = platform.system() == "Windows"
ON_MAC = platform.system() == "Darwin"
ON_LINUX = platform.system() == "Linux"
ON_POSIX = os.name == "posix"
def dbdirname(db, rc):
"""Gets the database dir name."""
if db.get("local", False) is False:
dbsdir = os.path.join(rc.builddir, "_dbs")
dbdir = os.path.join(dbsdir, db["name"])
else:
dbdir = db["url"]
return dbdir
def dbpathname(db, rc):
"""Gets the database path name."""
dbdir = dbdirname(db, rc)
dbpath = os.path.join(dbdir, db["path"])
return dbpath
def fallback(cond, backup):
"""Decorator for returning the object if cond is true and a backup if
cond is false. """
def dec(obj):
return obj if cond else backup
return dec
def all_docs_from_collection(client, collname, copy=True):
"""Yield all entries in for all collections of a given name in a given
database. """
yield from client.all_documents(collname, copy=copy)
SHORT_MONTH_NAMES = (
None,
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sept",
"Oct",
"Nov",
"Dec",
)
def date_to_rfc822(y, m, d=1):
"""Converts a date to an RFC 822 formatted string."""
d = datetime(int(y), month_to_int(m), int(d))
return email.utils.format_datetime(d)
def rfc822now():
"""Creates a string of the current time according to RFC 822."""
now = datetime.utcnow()
return email.utils.format_datetime(now)
def gets(seq, key, default=None):
"""Gets a key from every element of a sequence if possible."""
for x in seq:
yield x.get(key, default)
def month_and_year(m=None, y=None):
"""Creates a string from month and year data, if available."""
if y is None:
return "present"
if m is None:
return str(y)
m = month_to_int(m)
return "{0} {1}".format(SHORT_MONTH_NAMES[m], y)
def is_since(y, sy, m=1, d=1, sm=1, sd=1):
"""
tests whether a date is on or since another date
Parameters
----------
y : int
the year to be tested
sy : int
the since year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
sd: int
the since day. Optional, defaults to 1
Returns
-------
True if the target date is the same as, or more recent than, the since date
"""
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
d = "{}/{}/{}".format(d, month_to_int(m), y)
since = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return since <= date
def is_before(y, by, m=12, d=None, bm=12, bd=None):
"""
tests whether a date is on or before another date
Parameters
----------
y : int
the year to be tested
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Dec
d : int
the day to be tested. Defaults to last day of the month
bm : int or str
the before month. Optional, defaults to Dec
bd: int
the before day. Optional, defaults to last day of the month
Returns
-------
True if the target date is the same as, or earlier than, the before date
"""
if not d:
d = monthrange(y, month_to_int(m))[1]
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
b = "{}/{}/{}".format(bd, month_to_int(bm), by)
d = "{}/{}/{}".format(d, month_to_int(m), y)
before = time.mktime(datetime.strptime(b, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return before >= date
def is_between(y, sy, by, m=1, d=1, sm=1, sd=1, bm=12, bd=None):
"""
tests whether a date is on or between two other dates
returns true if the target date is between the since date and the before
date, inclusive.
Parameters
----------
y : int
the year to be tested
sy : int
the since year
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
bm : int or str
the before month. Optional, defaults to Dec
sd: int
the since day. Optional, defaults to 1
bd: int
the before day. Optional, defaults to 28
Returns
-------
True if the target date is between the since date and the before date,
inclusive (i.e., returns true if the target date is the same as either the
since date or the before date)
"""
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
return is_since(y, sy, m=m, d=d, sm=sm, sd=sd) and is_before(
y, by, m=m, d=d, bm=bm, bd=bd
)
def has_started(sy, sm=None, sd=None):
"""
true if today is after the dates given, inclusive
Parameters
----------
sy : int
the year to check today against
sm : int or str.
the month to check today against. Should be integer or in regolith MONTHS.
default is 1
sd : int.
the day to check today against. Default is 1
Returns
-------
bool
true if today is after dates given
"""
if not sm:
sm = 1
if not sd:
sd = 1
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
start = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
return start <= time.time()
def has_finished(ey, em=None, ed=None):
"""
true if today is before the dates given, inclusive
Parameters
----------
ey : int
end year, the year to check today against
em : int or str.
end month, the month to check today against. Should be integer or in regolith MONTHS.
default is 1
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
if not em:
em = 12
if not ed:
ed = monthrange(ey, month_to_int(em))[1]
e = "{}/{}/{}".format(ed, month_to_int(em), ey)
end = time.mktime(datetime.strptime(e, "%d/%m/%Y").timetuple())
return end <= time.time()
def is_current(sy, ey, sm=None, sd=None, em=None, ed=None):
"""
true if today is between the dates given, inclusive
Parameters
----------
sy : int
start year, the year to check today is after
ey : int
end year, the year to check today is before
sm : int or str
start month, the month to check today is after. Should be integer or in
regolith MONTHS. Default is 1
sd : int
start day, the day to check today after. Default is 1
em : int or str.
end month, the month to check today against. Should be integer or in
regolith MONTHS. Default is 12
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
return has_started(sy, sm, sd) and not has_finished(ey, em, ed)
def filter_publications(citations, authors, reverse=False, bold=True):
"""Filter publications by the author(s)/editor(s)
Parameters
----------
citations : list of dict
The publication citations
authors : set of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
bold : bool, optional
If True put latex bold around the author(s) in question
"""
pubs = []
for pub in citations:
if (
len((set(pub.get("author", [])) | set(
pub.get("editor", []))) & authors)
== 0
):
continue
pub = deepcopy(pub)
if bold:
bold_self = []
for a in pub["author"]:
if a in authors:
bold_self.append("\\textbf{" + a + "}")
else:
bold_self.append(a)
pub["author"] = bold_self
else:
pub = deepcopy(pub)
pubs.append(pub)
pubs.sort(key=doc_date_key, reverse=reverse)
return pubs
def filter_projects(projects, authors, reverse=False):
"""Filter projects by the author(s)
Parameters
----------
projects : list of dict
The publication citations
authors : set of list of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
"""
projs = []
for proj in projects:
team_names = set(gets(proj["team"], "name"))
if len(team_names & authors) == 0:
continue
# FIXME delete these lines if not required. I think they are wrong (SJLB)
# proj = dict(proj)
# proj["team"] = [x for x in proj["team"] if x["name"] in authors]
projs.append(proj)
projs.sort(key=id_key, reverse=reverse)
return projs
def filter_grants(input_grants, names, pi=True, reverse=True, multi_pi=False):
"""Filter grants by those involved
Parameters
----------
input_grants : list of dict
The grants to filter
names : set of str
The authors to be filtered against
pi : bool, optional
If True add the grant amount to that person's total amount
reverse : bool, optional
If True reverse the order, defaults to False
multi_pi : bool, optional
If True compute sub-awards for multi PI grants, defaults to False
"""
grants = []
total_amount = 0.0
subaward_amount = 0.0
for grant in input_grants:
team_names = set(gets(grant["team"], "name"))
if len(team_names & names) == 0:
continue
grant = deepcopy(grant)
person = [x for x in grant["team"] if x["name"] in names][0]
if pi:
if person["position"].lower() == "pi":
total_amount += grant["amount"]
else:
continue
elif multi_pi:
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["multi_pi"] = any(gets(grant["team"], "subaward_amount"))
else:
if person["position"].lower() == "pi":
continue
else:
total_amount += grant["amount"]
subaward_amount += person.get("subaward_amount", 0.0)
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["pi"] = [
x for x in grant["team"] if x["position"].lower() == "pi"
][0]
grant["me"] = person
grants.append(grant)
grants.sort(key=ene_date_key, reverse=reverse)
return grants, total_amount, subaward_amount
def awards_grants_honors(p):
"""Make sorted awards grants and honors list.
Parameters
----------
p : dict
The person entry
"""
aghs = []
for x in p.get("funding", ()):
d = {
"description": "{0} ({1}{2:,})".format(
latex_safe(x["name"]),
x.get("currency", "$").replace("$", "\$"),
x["value"],
),
"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0)),
}
aghs.append(d)
for x in p.get("service", []) + p.get("honors", []):
d = {"description": latex_safe(x["name"])}
if "year" in x:
d.update(
{"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0))}
)
elif "begin_year" in x and "end_year" in x:
d.update(
{
"year": "{}-{}".format(x["begin_year"], x["end_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
elif "begin_year" in x:
d.update(
{
"year": "{}".format(x["begin_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
aghs.append(d)
aghs.sort(key=(lambda x: x.get("_key", 0.0)), reverse=True)
return aghs
HTTP_RE = re.compile(
r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
)
def latex_safe_url(s):
"""Makes a string that is a URL latex safe."""
return s.replace("#", r"\#")
def latex_safe(s, url_check=True, wrapper="url"):
"""Make string latex safe
Parameters
----------
s : str
url_check : bool, optional
If True check for URLs and wrap them, if False check for URL but don't
wrap, defaults to True
wrapper : str, optional
The wrapper for wrapping urls defaults to url
"""
if not s:
return s
if url_check:
# If it looks like a URL make it a latex URL
url_search = HTTP_RE.search(s)
if url_search:
url = r"{start}\{wrapper}{{{s}}}{end}".format(
start=(latex_safe(s[: url_search.start()])),
end=(latex_safe(s[url_search.end():])),
wrapper=wrapper,
s=latex_safe_url(s[url_search.start(): url_search.end()]),
)
return url
return (
s.replace("&", r"\&")
.replace("$", r"\$")
.replace("#", r"\#")
.replace("_", r"\_")
)
def make_bibtex_file(pubs, pid, person_dir="."):
"""Make a bibtex file given the publications
Parameters
----------
pubs : list of dict
The publications
pid : str
The person id
person_dir : str, optional
The person's directory
"""
if not HAVE_BIBTEX_PARSER:
return None
skip_keys = {"ID", "ENTRYTYPE", "author"}
bibdb = BibDatabase()
bibwriter = BibTexWriter()
bibdb.entries = ents = []
for pub in pubs:
ent = dict(pub)
ent["ID"] = ent.pop("_id")
ent["ENTRYTYPE"] = ent.pop("entrytype")
for n in ["author", "editor"]:
if n in ent:
ent[n] = " and ".join(ent[n])
for key in ent.keys():
if key in skip_keys:
continue
ent[key] = latex_safe(str(ent[key]))
ents.append(ent)
fname = os.path.join(person_dir, pid) + ".bib"
with open(fname, "w", encoding="utf-8") as f:
f.write(bibwriter.write(bibdb))
return fname
def document_by_value(documents, address, value):
"""Get a specific document by one of its values
Parameters
----------
documents: generator
Generator which yields the documents
address: str or tuple
The address of the data in the document
value: any
The expected value for the document
Returns
-------
dict:
The first document which matches the request
"""
if isinstance(address, str):
address = (address,)
for g_doc in documents:
doc = deepcopy(g_doc)
for add in address:
doc = doc[add]
if doc == value:
return g_doc
def fuzzy_retrieval(documents, sources, value, case_sensitive=True):
"""Retrieve a document from the documents where value is compared against
multiple potential sources
Parameters
----------
documents: generator
The documents
sources: iterable
The potential data sources
value:
The value to compare against to find the document of interest
case_sensitive: Bool
When true will match case (Default = True)
Returns
-------
dict:
The document
Examples
--------
>>> fuzzy_retrieval(people, ['aka', 'name'], 'pi_name', case_sensitive = False)
This would get the person entry for which either the alias or the name was
``pi_name``.
"""
for doc in documents:
returns = []
for k in sources:
ret = doc.get(k, [])
if not isinstance(ret, list):
ret = [ret]
returns.extend(ret)
if not case_sensitive:
returns = [reti.lower() for reti in returns if
isinstance(reti, str)]
if isinstance(value, str):
if value.lower() in frozenset(returns):
return doc
else:
if value in frozenset(returns):
return doc
def number_suffix(number):
"""returns the suffix that adjectivises a number (st, nd, rd, th)
Paramters
---------
number: integer
The number. If number is not an integer, returns an empty string
Returns
-------
suffix: string
The suffix (st, nd, rd, th)
"""
if not isinstance(number, (int, float)):
return ""
if 10 < number < 20:
suffix = "th"
else:
suffix = {1: "st", 2: "nd", 3: "rd"}.get(number % 10, "th")
return suffix
def dereference_institution(input_record, institutions):
"""Tool for replacing placeholders for institutions with the actual
institution data. Note that the replacement is done inplace
Parameters
----------
input_record : dict
The record to dereference
institutions : iterable of dicts
The institutions
"""
inst = input_record.get("institution") or input_record.get("organization")
if not inst:
error = input_record.get("position") or input_record.get("degree")
print("WARNING: no institution or organization but found {}".format(
error))
db_inst = fuzzy_retrieval(institutions, ["name", "_id", "aka"], inst)
if db_inst:
input_record["institution"] = db_inst["name"]
input_record["organization"] = db_inst["name"]
if db_inst.get("country") == "USA":
state_country = db_inst.get("state")
else:
state_country = db_inst.get("country")
input_record["location"] = "{}, {}".format(db_inst["city"],
state_country)
if not db_inst.get("departments"):
print("WARNING: no departments in {}. {} sought".format(
db_inst.get("_id"), inst))
if "department" in input_record and db_inst.get("departments"):
input_record["department"] = fuzzy_retrieval(
[db_inst["departments"]], ["name", "aka"],
input_record["department"]
)
else:
input_record["department"] = inst
def merge_collections(a, b, target_id):
"""
merge two collections into a single merged collection
for keys that are in both collections, the value in b will be kept
Parameters
----------
a the inferior collection (will lose values of shared keys)
b the superior collection (will keep values of shared keys)
target_id str the name of the key used in b to dereference ids in a
Returns
-------
the combined collection. Note that it returns a collection only containing
merged items from a and b that are dereferenced in b, i.e., the merged
intercept. If you want the union you can update the returned collection
with a.
Examples
--------
>>> grants = merge_collections(self.gtx["proposals"], self.gtx["grants"], "proposal_id")
This would merge all entries in the proposals collection with entries in the
grants collection for which "_id" in proposals has the value of
"proposal_id" in grants.
"""
adict = {}
for k in a:
adict[k.get("_id")] = k
bdict = {}
for k in b:
bdict[k.get("_id")] = k
b_for_a = {}
for k in adict:
for kk, v in bdict.items():
if v.get(target_id, "") == k:
b_for_a[k] = kk
chained = {}
for k, v in b_for_a.items():
chained[k] = ChainDB(adict[k], bdict[v])
return list(chained.values())
def update_schemas(default_schema, user_schema):
"""
Merging the user schema into the default schema recursively and return the
merged schema. The default schema and user schema will not be modified
during the merging.
Parameters
----------
default_schema : dict
The default schema.
user_schema : dict
The user defined schema.
Returns
-------
updated_schema : dict
The merged schema.
"""
updated_schema = deepcopy(default_schema)
for key in user_schema.keys():
if (key in updated_schema) and isinstance(updated_schema[key],
dict) and isinstance(
user_schema[key], dict):
updated_schema[key] = update_schemas(updated_schema[key],
user_schema[key])
else:
updated_schema[key] = user_schema[key]
return updated_schema
def is_fully_loaded(appts):
status = True
earliest, latest = date.today(), date.today()
for appt in appts:
dates = get_dates(appt)
begin_date = dates['begin_date']
end_date = dates['end_date']
if latest == date.today():
latest = end_date
appt['begin_date'] = begin_date
appt['end_date'] = end_date
if begin_date < earliest:
earliest = begin_date
if end_date > latest:
latest = end_date
datearray = []
timespan = latest - earliest
for x in range(0, timespan.days):
datearray.append(earliest + timedelta(days=x))
loading = [0] * len(datearray)
for day in datearray:
for appt in appts:
if appt['begin_date'] <= day <= appt["end_date"]:
loading[datearray.index(day)] = loading[datearray.index(day)] + \
appt.get("loading")
if max(loading) > 1.0:
status = False
print("max {} at {}".format(max(loading),
datearray[
list(loading).index(max(loading))]))
elif min(loading) < 1.0:
status = False
print("min {} at {}".format(min(loading),
datearray[list(loading).index(min(loading))]
))
return status
def group(db, by):
"""
Group the document in the database according to the value of the doc[by] in db.
Parameters
----------
db : iterable
The database of documents.
by : basestring
The key to group the documents.
Returns
-------
grouped: dict
A dictionary mapping the feature value of group to the list of docs. All docs in the same generator have
the same value of doc[by].
Examples
--------
Here, we use a tuple of dict as an example of the database.
>>> db = ({"k": "v0"}, {"k": "v1"}, {"k": "v0"})
>>> group(db)
This will return
>>> {"v0": [{"k": "v0"}, {"k": "v0"}], "v1": [{"k": "v1"}]}
"""
grouped = {}
doc: dict
for doc in db:
key = doc.get(by)
if not key:
print("There is no field {} in {}".format(by, id_key(doc)))
elif key not in grouped:
grouped[key] = [doc]
else:
grouped[key].append(doc)
return grouped
def get_pi_id(rc):
"""
Gets the database id of the group PI
Parameters
----------
rc: runcontrol object
The runcontrol object. It must contain the 'groups' and 'people'
collections in the needed databases
Returns
-------
The database '_id' of the group PI
"""
groupiter = list(all_docs_from_collection(rc.client, "groups"))
peoplecoll = all_docs_from_collection(rc.client, "people")
pi_ref = [i.get("pi_name") for i in groupiter if
i.get("name").casefold() == rc.groupname.casefold()]
pi = fuzzy_retrieval(peoplecoll, ["_id", "aka", "name"], pi_ref[0])
return pi.get("_id")
def group_member_ids(ppl_coll, grpname):
"""Get a list of all group member ids
Parameters
----------
ppl_coll: collection (list of dicts)
The people collection that should contain the group members
grp: string
The id of the group in groups.yml
Returns
-------
set:
The set of ids of the people in the group
Notes
-----
- Groups that are being tracked are listed in the groups.yml collection
with a name and an id.
- People are in a group during an educational or employment period.
- To assign a person to a tracked group during one such period, add
a "group" key to that education/employment item with a value
that is the group id.
- This function takes the group id that is passed and searches
the people collection for all people that have been
assigned to that group in some period of time and returns a list of
"""
grpmembers = set()
for person in ppl_coll:
for k in ["education", "employment"]:
for position in person.get(k, {}):
if position.get("group", None) == grpname:
grpmembers.add(person["_id"])
return grpmembers
|
is_between
|
tests whether a date is on or between two other dates
returns true if the target date is between the since date and the before
date, inclusive.
Parameters
----------
y : int
the year to be tested
sy : int
the since year
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
bm : int or str
the before month. Optional, defaults to Dec
sd: int
the since day. Optional, defaults to 1
bd: int
the before day. Optional, defaults to 28
Returns
-------
True if the target date is between the since date and the before date,
inclusive (i.e., returns true if the target date is the same as either the
since date or the before date)
|
"""Misc. regolith tools.
"""
import email.utils
import os
import platform
import re
import sys
import time
from copy import deepcopy
from calendar import monthrange
from datetime import datetime, date, timedelta
from regolith.dates import month_to_int, date_to_float, get_dates
from regolith.sorters import doc_date_key, id_key, ene_date_key
from regolith.chained_db import ChainDB
try:
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
HAVE_BIBTEX_PARSER = True
except ImportError:
HAVE_BIBTEX_PARSER = False
LATEX_OPTS = ["-halt-on-error", "-file-line-error"]
if sys.version_info[0] >= 3:
string_types = (str, bytes)
unicode_type = str
else:
pass
# string_types = (str, unicode)
# unicode_type = unicode
DEFAULT_ENCODING = sys.getdefaultencoding()
ON_WINDOWS = platform.system() == "Windows"
ON_MAC = platform.system() == "Darwin"
ON_LINUX = platform.system() == "Linux"
ON_POSIX = os.name == "posix"
def dbdirname(db, rc):
"""Gets the database dir name."""
if db.get("local", False) is False:
dbsdir = os.path.join(rc.builddir, "_dbs")
dbdir = os.path.join(dbsdir, db["name"])
else:
dbdir = db["url"]
return dbdir
def dbpathname(db, rc):
"""Gets the database path name."""
dbdir = dbdirname(db, rc)
dbpath = os.path.join(dbdir, db["path"])
return dbpath
def fallback(cond, backup):
"""Decorator for returning the object if cond is true and a backup if
cond is false. """
def dec(obj):
return obj if cond else backup
return dec
def all_docs_from_collection(client, collname, copy=True):
"""Yield all entries in for all collections of a given name in a given
database. """
yield from client.all_documents(collname, copy=copy)
SHORT_MONTH_NAMES = (
None,
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sept",
"Oct",
"Nov",
"Dec",
)
def date_to_rfc822(y, m, d=1):
"""Converts a date to an RFC 822 formatted string."""
d = datetime(int(y), month_to_int(m), int(d))
return email.utils.format_datetime(d)
def rfc822now():
"""Creates a string of the current time according to RFC 822."""
now = datetime.utcnow()
return email.utils.format_datetime(now)
def gets(seq, key, default=None):
"""Gets a key from every element of a sequence if possible."""
for x in seq:
yield x.get(key, default)
def month_and_year(m=None, y=None):
"""Creates a string from month and year data, if available."""
if y is None:
return "present"
if m is None:
return str(y)
m = month_to_int(m)
return "{0} {1}".format(SHORT_MONTH_NAMES[m], y)
def is_since(y, sy, m=1, d=1, sm=1, sd=1):
"""
tests whether a date is on or since another date
Parameters
----------
y : int
the year to be tested
sy : int
the since year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
sd: int
the since day. Optional, defaults to 1
Returns
-------
True if the target date is the same as, or more recent than, the since date
"""
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
d = "{}/{}/{}".format(d, month_to_int(m), y)
since = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return since <= date
def is_before(y, by, m=12, d=None, bm=12, bd=None):
"""
tests whether a date is on or before another date
Parameters
----------
y : int
the year to be tested
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Dec
d : int
the day to be tested. Defaults to last day of the month
bm : int or str
the before month. Optional, defaults to Dec
bd: int
the before day. Optional, defaults to last day of the month
Returns
-------
True if the target date is the same as, or earlier than, the before date
"""
if not d:
d = monthrange(y, month_to_int(m))[1]
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
b = "{}/{}/{}".format(bd, month_to_int(bm), by)
d = "{}/{}/{}".format(d, month_to_int(m), y)
before = time.mktime(datetime.strptime(b, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return before >= date
# MASKED: is_between function (lines 188-228)
def has_started(sy, sm=None, sd=None):
"""
true if today is after the dates given, inclusive
Parameters
----------
sy : int
the year to check today against
sm : int or str.
the month to check today against. Should be integer or in regolith MONTHS.
default is 1
sd : int.
the day to check today against. Default is 1
Returns
-------
bool
true if today is after dates given
"""
if not sm:
sm = 1
if not sd:
sd = 1
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
start = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
return start <= time.time()
def has_finished(ey, em=None, ed=None):
"""
true if today is before the dates given, inclusive
Parameters
----------
ey : int
end year, the year to check today against
em : int or str.
end month, the month to check today against. Should be integer or in regolith MONTHS.
default is 1
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
if not em:
em = 12
if not ed:
ed = monthrange(ey, month_to_int(em))[1]
e = "{}/{}/{}".format(ed, month_to_int(em), ey)
end = time.mktime(datetime.strptime(e, "%d/%m/%Y").timetuple())
return end <= time.time()
def is_current(sy, ey, sm=None, sd=None, em=None, ed=None):
"""
true if today is between the dates given, inclusive
Parameters
----------
sy : int
start year, the year to check today is after
ey : int
end year, the year to check today is before
sm : int or str
start month, the month to check today is after. Should be integer or in
regolith MONTHS. Default is 1
sd : int
start day, the day to check today after. Default is 1
em : int or str.
end month, the month to check today against. Should be integer or in
regolith MONTHS. Default is 12
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
return has_started(sy, sm, sd) and not has_finished(ey, em, ed)
def filter_publications(citations, authors, reverse=False, bold=True):
"""Filter publications by the author(s)/editor(s)
Parameters
----------
citations : list of dict
The publication citations
authors : set of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
bold : bool, optional
If True put latex bold around the author(s) in question
"""
pubs = []
for pub in citations:
if (
len((set(pub.get("author", [])) | set(
pub.get("editor", []))) & authors)
== 0
):
continue
pub = deepcopy(pub)
if bold:
bold_self = []
for a in pub["author"]:
if a in authors:
bold_self.append("\\textbf{" + a + "}")
else:
bold_self.append(a)
pub["author"] = bold_self
else:
pub = deepcopy(pub)
pubs.append(pub)
pubs.sort(key=doc_date_key, reverse=reverse)
return pubs
def filter_projects(projects, authors, reverse=False):
"""Filter projects by the author(s)
Parameters
----------
projects : list of dict
The publication citations
authors : set of list of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
"""
projs = []
for proj in projects:
team_names = set(gets(proj["team"], "name"))
if len(team_names & authors) == 0:
continue
# FIXME delete these lines if not required. I think they are wrong (SJLB)
# proj = dict(proj)
# proj["team"] = [x for x in proj["team"] if x["name"] in authors]
projs.append(proj)
projs.sort(key=id_key, reverse=reverse)
return projs
def filter_grants(input_grants, names, pi=True, reverse=True, multi_pi=False):
"""Filter grants by those involved
Parameters
----------
input_grants : list of dict
The grants to filter
names : set of str
The authors to be filtered against
pi : bool, optional
If True add the grant amount to that person's total amount
reverse : bool, optional
If True reverse the order, defaults to False
multi_pi : bool, optional
If True compute sub-awards for multi PI grants, defaults to False
"""
grants = []
total_amount = 0.0
subaward_amount = 0.0
for grant in input_grants:
team_names = set(gets(grant["team"], "name"))
if len(team_names & names) == 0:
continue
grant = deepcopy(grant)
person = [x for x in grant["team"] if x["name"] in names][0]
if pi:
if person["position"].lower() == "pi":
total_amount += grant["amount"]
else:
continue
elif multi_pi:
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["multi_pi"] = any(gets(grant["team"], "subaward_amount"))
else:
if person["position"].lower() == "pi":
continue
else:
total_amount += grant["amount"]
subaward_amount += person.get("subaward_amount", 0.0)
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["pi"] = [
x for x in grant["team"] if x["position"].lower() == "pi"
][0]
grant["me"] = person
grants.append(grant)
grants.sort(key=ene_date_key, reverse=reverse)
return grants, total_amount, subaward_amount
def awards_grants_honors(p):
"""Make sorted awards grants and honors list.
Parameters
----------
p : dict
The person entry
"""
aghs = []
for x in p.get("funding", ()):
d = {
"description": "{0} ({1}{2:,})".format(
latex_safe(x["name"]),
x.get("currency", "$").replace("$", "\$"),
x["value"],
),
"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0)),
}
aghs.append(d)
for x in p.get("service", []) + p.get("honors", []):
d = {"description": latex_safe(x["name"])}
if "year" in x:
d.update(
{"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0))}
)
elif "begin_year" in x and "end_year" in x:
d.update(
{
"year": "{}-{}".format(x["begin_year"], x["end_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
elif "begin_year" in x:
d.update(
{
"year": "{}".format(x["begin_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
aghs.append(d)
aghs.sort(key=(lambda x: x.get("_key", 0.0)), reverse=True)
return aghs
HTTP_RE = re.compile(
r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
)
def latex_safe_url(s):
"""Makes a string that is a URL latex safe."""
return s.replace("#", r"\#")
def latex_safe(s, url_check=True, wrapper="url"):
"""Make string latex safe
Parameters
----------
s : str
url_check : bool, optional
If True check for URLs and wrap them, if False check for URL but don't
wrap, defaults to True
wrapper : str, optional
The wrapper for wrapping urls defaults to url
"""
if not s:
return s
if url_check:
# If it looks like a URL make it a latex URL
url_search = HTTP_RE.search(s)
if url_search:
url = r"{start}\{wrapper}{{{s}}}{end}".format(
start=(latex_safe(s[: url_search.start()])),
end=(latex_safe(s[url_search.end():])),
wrapper=wrapper,
s=latex_safe_url(s[url_search.start(): url_search.end()]),
)
return url
return (
s.replace("&", r"\&")
.replace("$", r"\$")
.replace("#", r"\#")
.replace("_", r"\_")
)
def make_bibtex_file(pubs, pid, person_dir="."):
"""Make a bibtex file given the publications
Parameters
----------
pubs : list of dict
The publications
pid : str
The person id
person_dir : str, optional
The person's directory
"""
if not HAVE_BIBTEX_PARSER:
return None
skip_keys = {"ID", "ENTRYTYPE", "author"}
bibdb = BibDatabase()
bibwriter = BibTexWriter()
bibdb.entries = ents = []
for pub in pubs:
ent = dict(pub)
ent["ID"] = ent.pop("_id")
ent["ENTRYTYPE"] = ent.pop("entrytype")
for n in ["author", "editor"]:
if n in ent:
ent[n] = " and ".join(ent[n])
for key in ent.keys():
if key in skip_keys:
continue
ent[key] = latex_safe(str(ent[key]))
ents.append(ent)
fname = os.path.join(person_dir, pid) + ".bib"
with open(fname, "w", encoding="utf-8") as f:
f.write(bibwriter.write(bibdb))
return fname
def document_by_value(documents, address, value):
"""Get a specific document by one of its values
Parameters
----------
documents: generator
Generator which yields the documents
address: str or tuple
The address of the data in the document
value: any
The expected value for the document
Returns
-------
dict:
The first document which matches the request
"""
if isinstance(address, str):
address = (address,)
for g_doc in documents:
doc = deepcopy(g_doc)
for add in address:
doc = doc[add]
if doc == value:
return g_doc
def fuzzy_retrieval(documents, sources, value, case_sensitive=True):
"""Retrieve a document from the documents where value is compared against
multiple potential sources
Parameters
----------
documents: generator
The documents
sources: iterable
The potential data sources
value:
The value to compare against to find the document of interest
case_sensitive: Bool
When true will match case (Default = True)
Returns
-------
dict:
The document
Examples
--------
>>> fuzzy_retrieval(people, ['aka', 'name'], 'pi_name', case_sensitive = False)
This would get the person entry for which either the alias or the name was
``pi_name``.
"""
for doc in documents:
returns = []
for k in sources:
ret = doc.get(k, [])
if not isinstance(ret, list):
ret = [ret]
returns.extend(ret)
if not case_sensitive:
returns = [reti.lower() for reti in returns if
isinstance(reti, str)]
if isinstance(value, str):
if value.lower() in frozenset(returns):
return doc
else:
if value in frozenset(returns):
return doc
def number_suffix(number):
"""returns the suffix that adjectivises a number (st, nd, rd, th)
Paramters
---------
number: integer
The number. If number is not an integer, returns an empty string
Returns
-------
suffix: string
The suffix (st, nd, rd, th)
"""
if not isinstance(number, (int, float)):
return ""
if 10 < number < 20:
suffix = "th"
else:
suffix = {1: "st", 2: "nd", 3: "rd"}.get(number % 10, "th")
return suffix
def dereference_institution(input_record, institutions):
"""Tool for replacing placeholders for institutions with the actual
institution data. Note that the replacement is done inplace
Parameters
----------
input_record : dict
The record to dereference
institutions : iterable of dicts
The institutions
"""
inst = input_record.get("institution") or input_record.get("organization")
if not inst:
error = input_record.get("position") or input_record.get("degree")
print("WARNING: no institution or organization but found {}".format(
error))
db_inst = fuzzy_retrieval(institutions, ["name", "_id", "aka"], inst)
if db_inst:
input_record["institution"] = db_inst["name"]
input_record["organization"] = db_inst["name"]
if db_inst.get("country") == "USA":
state_country = db_inst.get("state")
else:
state_country = db_inst.get("country")
input_record["location"] = "{}, {}".format(db_inst["city"],
state_country)
if not db_inst.get("departments"):
print("WARNING: no departments in {}. {} sought".format(
db_inst.get("_id"), inst))
if "department" in input_record and db_inst.get("departments"):
input_record["department"] = fuzzy_retrieval(
[db_inst["departments"]], ["name", "aka"],
input_record["department"]
)
else:
input_record["department"] = inst
def merge_collections(a, b, target_id):
"""
merge two collections into a single merged collection
for keys that are in both collections, the value in b will be kept
Parameters
----------
a the inferior collection (will lose values of shared keys)
b the superior collection (will keep values of shared keys)
target_id str the name of the key used in b to dereference ids in a
Returns
-------
the combined collection. Note that it returns a collection only containing
merged items from a and b that are dereferenced in b, i.e., the merged
intercept. If you want the union you can update the returned collection
with a.
Examples
--------
>>> grants = merge_collections(self.gtx["proposals"], self.gtx["grants"], "proposal_id")
This would merge all entries in the proposals collection with entries in the
grants collection for which "_id" in proposals has the value of
"proposal_id" in grants.
"""
adict = {}
for k in a:
adict[k.get("_id")] = k
bdict = {}
for k in b:
bdict[k.get("_id")] = k
b_for_a = {}
for k in adict:
for kk, v in bdict.items():
if v.get(target_id, "") == k:
b_for_a[k] = kk
chained = {}
for k, v in b_for_a.items():
chained[k] = ChainDB(adict[k], bdict[v])
return list(chained.values())
def update_schemas(default_schema, user_schema):
"""
Merging the user schema into the default schema recursively and return the
merged schema. The default schema and user schema will not be modified
during the merging.
Parameters
----------
default_schema : dict
The default schema.
user_schema : dict
The user defined schema.
Returns
-------
updated_schema : dict
The merged schema.
"""
updated_schema = deepcopy(default_schema)
for key in user_schema.keys():
if (key in updated_schema) and isinstance(updated_schema[key],
dict) and isinstance(
user_schema[key], dict):
updated_schema[key] = update_schemas(updated_schema[key],
user_schema[key])
else:
updated_schema[key] = user_schema[key]
return updated_schema
def is_fully_loaded(appts):
status = True
earliest, latest = date.today(), date.today()
for appt in appts:
dates = get_dates(appt)
begin_date = dates['begin_date']
end_date = dates['end_date']
if latest == date.today():
latest = end_date
appt['begin_date'] = begin_date
appt['end_date'] = end_date
if begin_date < earliest:
earliest = begin_date
if end_date > latest:
latest = end_date
datearray = []
timespan = latest - earliest
for x in range(0, timespan.days):
datearray.append(earliest + timedelta(days=x))
loading = [0] * len(datearray)
for day in datearray:
for appt in appts:
if appt['begin_date'] <= day <= appt["end_date"]:
loading[datearray.index(day)] = loading[datearray.index(day)] + \
appt.get("loading")
if max(loading) > 1.0:
status = False
print("max {} at {}".format(max(loading),
datearray[
list(loading).index(max(loading))]))
elif min(loading) < 1.0:
status = False
print("min {} at {}".format(min(loading),
datearray[list(loading).index(min(loading))]
))
return status
def group(db, by):
"""
Group the document in the database according to the value of the doc[by] in db.
Parameters
----------
db : iterable
The database of documents.
by : basestring
The key to group the documents.
Returns
-------
grouped: dict
A dictionary mapping the feature value of group to the list of docs. All docs in the same generator have
the same value of doc[by].
Examples
--------
Here, we use a tuple of dict as an example of the database.
>>> db = ({"k": "v0"}, {"k": "v1"}, {"k": "v0"})
>>> group(db)
This will return
>>> {"v0": [{"k": "v0"}, {"k": "v0"}], "v1": [{"k": "v1"}]}
"""
grouped = {}
doc: dict
for doc in db:
key = doc.get(by)
if not key:
print("There is no field {} in {}".format(by, id_key(doc)))
elif key not in grouped:
grouped[key] = [doc]
else:
grouped[key].append(doc)
return grouped
def get_pi_id(rc):
"""
Gets the database id of the group PI
Parameters
----------
rc: runcontrol object
The runcontrol object. It must contain the 'groups' and 'people'
collections in the needed databases
Returns
-------
The database '_id' of the group PI
"""
groupiter = list(all_docs_from_collection(rc.client, "groups"))
peoplecoll = all_docs_from_collection(rc.client, "people")
pi_ref = [i.get("pi_name") for i in groupiter if
i.get("name").casefold() == rc.groupname.casefold()]
pi = fuzzy_retrieval(peoplecoll, ["_id", "aka", "name"], pi_ref[0])
return pi.get("_id")
def group_member_ids(ppl_coll, grpname):
"""Get a list of all group member ids
Parameters
----------
ppl_coll: collection (list of dicts)
The people collection that should contain the group members
grp: string
The id of the group in groups.yml
Returns
-------
set:
The set of ids of the people in the group
Notes
-----
- Groups that are being tracked are listed in the groups.yml collection
with a name and an id.
- People are in a group during an educational or employment period.
- To assign a person to a tracked group during one such period, add
a "group" key to that education/employment item with a value
that is the group id.
- This function takes the group id that is passed and searches
the people collection for all people that have been
assigned to that group in some period of time and returns a list of
"""
grpmembers = set()
for person in ppl_coll:
for k in ["education", "employment"]:
for position in person.get(k, {}):
if position.get("group", None) == grpname:
grpmembers.add(person["_id"])
return grpmembers
|
def is_between(y, sy, by, m=1, d=1, sm=1, sd=1, bm=12, bd=None):
"""
tests whether a date is on or between two other dates
returns true if the target date is between the since date and the before
date, inclusive.
Parameters
----------
y : int
the year to be tested
sy : int
the since year
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
bm : int or str
the before month. Optional, defaults to Dec
sd: int
the since day. Optional, defaults to 1
bd: int
the before day. Optional, defaults to 28
Returns
-------
True if the target date is between the since date and the before date,
inclusive (i.e., returns true if the target date is the same as either the
since date or the before date)
"""
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
return is_since(y, sy, m=m, d=d, sm=sm, sd=sd) and is_before(
y, by, m=m, d=d, bm=bm, bd=bd
)
| 188 | 228 |
"""Misc. regolith tools.
"""
import email.utils
import os
import platform
import re
import sys
import time
from copy import deepcopy
from calendar import monthrange
from datetime import datetime, date, timedelta
from regolith.dates import month_to_int, date_to_float, get_dates
from regolith.sorters import doc_date_key, id_key, ene_date_key
from regolith.chained_db import ChainDB
try:
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
HAVE_BIBTEX_PARSER = True
except ImportError:
HAVE_BIBTEX_PARSER = False
LATEX_OPTS = ["-halt-on-error", "-file-line-error"]
if sys.version_info[0] >= 3:
string_types = (str, bytes)
unicode_type = str
else:
pass
# string_types = (str, unicode)
# unicode_type = unicode
DEFAULT_ENCODING = sys.getdefaultencoding()
ON_WINDOWS = platform.system() == "Windows"
ON_MAC = platform.system() == "Darwin"
ON_LINUX = platform.system() == "Linux"
ON_POSIX = os.name == "posix"
def dbdirname(db, rc):
"""Gets the database dir name."""
if db.get("local", False) is False:
dbsdir = os.path.join(rc.builddir, "_dbs")
dbdir = os.path.join(dbsdir, db["name"])
else:
dbdir = db["url"]
return dbdir
def dbpathname(db, rc):
"""Gets the database path name."""
dbdir = dbdirname(db, rc)
dbpath = os.path.join(dbdir, db["path"])
return dbpath
def fallback(cond, backup):
"""Decorator for returning the object if cond is true and a backup if
cond is false. """
def dec(obj):
return obj if cond else backup
return dec
def all_docs_from_collection(client, collname, copy=True):
"""Yield all entries in for all collections of a given name in a given
database. """
yield from client.all_documents(collname, copy=copy)
SHORT_MONTH_NAMES = (
None,
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sept",
"Oct",
"Nov",
"Dec",
)
def date_to_rfc822(y, m, d=1):
"""Converts a date to an RFC 822 formatted string."""
d = datetime(int(y), month_to_int(m), int(d))
return email.utils.format_datetime(d)
def rfc822now():
"""Creates a string of the current time according to RFC 822."""
now = datetime.utcnow()
return email.utils.format_datetime(now)
def gets(seq, key, default=None):
"""Gets a key from every element of a sequence if possible."""
for x in seq:
yield x.get(key, default)
def month_and_year(m=None, y=None):
"""Creates a string from month and year data, if available."""
if y is None:
return "present"
if m is None:
return str(y)
m = month_to_int(m)
return "{0} {1}".format(SHORT_MONTH_NAMES[m], y)
def is_since(y, sy, m=1, d=1, sm=1, sd=1):
"""
tests whether a date is on or since another date
Parameters
----------
y : int
the year to be tested
sy : int
the since year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
sd: int
the since day. Optional, defaults to 1
Returns
-------
True if the target date is the same as, or more recent than, the since date
"""
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
d = "{}/{}/{}".format(d, month_to_int(m), y)
since = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return since <= date
def is_before(y, by, m=12, d=None, bm=12, bd=None):
"""
tests whether a date is on or before another date
Parameters
----------
y : int
the year to be tested
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Dec
d : int
the day to be tested. Defaults to last day of the month
bm : int or str
the before month. Optional, defaults to Dec
bd: int
the before day. Optional, defaults to last day of the month
Returns
-------
True if the target date is the same as, or earlier than, the before date
"""
if not d:
d = monthrange(y, month_to_int(m))[1]
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
b = "{}/{}/{}".format(bd, month_to_int(bm), by)
d = "{}/{}/{}".format(d, month_to_int(m), y)
before = time.mktime(datetime.strptime(b, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return before >= date
def is_between(y, sy, by, m=1, d=1, sm=1, sd=1, bm=12, bd=None):
"""
tests whether a date is on or between two other dates
returns true if the target date is between the since date and the before
date, inclusive.
Parameters
----------
y : int
the year to be tested
sy : int
the since year
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
bm : int or str
the before month. Optional, defaults to Dec
sd: int
the since day. Optional, defaults to 1
bd: int
the before day. Optional, defaults to 28
Returns
-------
True if the target date is between the since date and the before date,
inclusive (i.e., returns true if the target date is the same as either the
since date or the before date)
"""
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
return is_since(y, sy, m=m, d=d, sm=sm, sd=sd) and is_before(
y, by, m=m, d=d, bm=bm, bd=bd
)
def has_started(sy, sm=None, sd=None):
"""
true if today is after the dates given, inclusive
Parameters
----------
sy : int
the year to check today against
sm : int or str.
the month to check today against. Should be integer or in regolith MONTHS.
default is 1
sd : int.
the day to check today against. Default is 1
Returns
-------
bool
true if today is after dates given
"""
if not sm:
sm = 1
if not sd:
sd = 1
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
start = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
return start <= time.time()
def has_finished(ey, em=None, ed=None):
"""
true if today is before the dates given, inclusive
Parameters
----------
ey : int
end year, the year to check today against
em : int or str.
end month, the month to check today against. Should be integer or in regolith MONTHS.
default is 1
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
if not em:
em = 12
if not ed:
ed = monthrange(ey, month_to_int(em))[1]
e = "{}/{}/{}".format(ed, month_to_int(em), ey)
end = time.mktime(datetime.strptime(e, "%d/%m/%Y").timetuple())
return end <= time.time()
def is_current(sy, ey, sm=None, sd=None, em=None, ed=None):
"""
true if today is between the dates given, inclusive
Parameters
----------
sy : int
start year, the year to check today is after
ey : int
end year, the year to check today is before
sm : int or str
start month, the month to check today is after. Should be integer or in
regolith MONTHS. Default is 1
sd : int
start day, the day to check today after. Default is 1
em : int or str.
end month, the month to check today against. Should be integer or in
regolith MONTHS. Default is 12
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
return has_started(sy, sm, sd) and not has_finished(ey, em, ed)
def filter_publications(citations, authors, reverse=False, bold=True):
"""Filter publications by the author(s)/editor(s)
Parameters
----------
citations : list of dict
The publication citations
authors : set of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
bold : bool, optional
If True put latex bold around the author(s) in question
"""
pubs = []
for pub in citations:
if (
len((set(pub.get("author", [])) | set(
pub.get("editor", []))) & authors)
== 0
):
continue
pub = deepcopy(pub)
if bold:
bold_self = []
for a in pub["author"]:
if a in authors:
bold_self.append("\\textbf{" + a + "}")
else:
bold_self.append(a)
pub["author"] = bold_self
else:
pub = deepcopy(pub)
pubs.append(pub)
pubs.sort(key=doc_date_key, reverse=reverse)
return pubs
def filter_projects(projects, authors, reverse=False):
"""Filter projects by the author(s)
Parameters
----------
projects : list of dict
The publication citations
authors : set of list of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
"""
projs = []
for proj in projects:
team_names = set(gets(proj["team"], "name"))
if len(team_names & authors) == 0:
continue
# FIXME delete these lines if not required. I think they are wrong (SJLB)
# proj = dict(proj)
# proj["team"] = [x for x in proj["team"] if x["name"] in authors]
projs.append(proj)
projs.sort(key=id_key, reverse=reverse)
return projs
def filter_grants(input_grants, names, pi=True, reverse=True, multi_pi=False):
"""Filter grants by those involved
Parameters
----------
input_grants : list of dict
The grants to filter
names : set of str
The authors to be filtered against
pi : bool, optional
If True add the grant amount to that person's total amount
reverse : bool, optional
If True reverse the order, defaults to False
multi_pi : bool, optional
If True compute sub-awards for multi PI grants, defaults to False
"""
grants = []
total_amount = 0.0
subaward_amount = 0.0
for grant in input_grants:
team_names = set(gets(grant["team"], "name"))
if len(team_names & names) == 0:
continue
grant = deepcopy(grant)
person = [x for x in grant["team"] if x["name"] in names][0]
if pi:
if person["position"].lower() == "pi":
total_amount += grant["amount"]
else:
continue
elif multi_pi:
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["multi_pi"] = any(gets(grant["team"], "subaward_amount"))
else:
if person["position"].lower() == "pi":
continue
else:
total_amount += grant["amount"]
subaward_amount += person.get("subaward_amount", 0.0)
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["pi"] = [
x for x in grant["team"] if x["position"].lower() == "pi"
][0]
grant["me"] = person
grants.append(grant)
grants.sort(key=ene_date_key, reverse=reverse)
return grants, total_amount, subaward_amount
def awards_grants_honors(p):
"""Make sorted awards grants and honors list.
Parameters
----------
p : dict
The person entry
"""
aghs = []
for x in p.get("funding", ()):
d = {
"description": "{0} ({1}{2:,})".format(
latex_safe(x["name"]),
x.get("currency", "$").replace("$", "\$"),
x["value"],
),
"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0)),
}
aghs.append(d)
for x in p.get("service", []) + p.get("honors", []):
d = {"description": latex_safe(x["name"])}
if "year" in x:
d.update(
{"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0))}
)
elif "begin_year" in x and "end_year" in x:
d.update(
{
"year": "{}-{}".format(x["begin_year"], x["end_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
elif "begin_year" in x:
d.update(
{
"year": "{}".format(x["begin_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
aghs.append(d)
aghs.sort(key=(lambda x: x.get("_key", 0.0)), reverse=True)
return aghs
HTTP_RE = re.compile(
r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
)
def latex_safe_url(s):
"""Makes a string that is a URL latex safe."""
return s.replace("#", r"\#")
def latex_safe(s, url_check=True, wrapper="url"):
"""Make string latex safe
Parameters
----------
s : str
url_check : bool, optional
If True check for URLs and wrap them, if False check for URL but don't
wrap, defaults to True
wrapper : str, optional
The wrapper for wrapping urls defaults to url
"""
if not s:
return s
if url_check:
# If it looks like a URL make it a latex URL
url_search = HTTP_RE.search(s)
if url_search:
url = r"{start}\{wrapper}{{{s}}}{end}".format(
start=(latex_safe(s[: url_search.start()])),
end=(latex_safe(s[url_search.end():])),
wrapper=wrapper,
s=latex_safe_url(s[url_search.start(): url_search.end()]),
)
return url
return (
s.replace("&", r"\&")
.replace("$", r"\$")
.replace("#", r"\#")
.replace("_", r"\_")
)
def make_bibtex_file(pubs, pid, person_dir="."):
"""Make a bibtex file given the publications
Parameters
----------
pubs : list of dict
The publications
pid : str
The person id
person_dir : str, optional
The person's directory
"""
if not HAVE_BIBTEX_PARSER:
return None
skip_keys = {"ID", "ENTRYTYPE", "author"}
bibdb = BibDatabase()
bibwriter = BibTexWriter()
bibdb.entries = ents = []
for pub in pubs:
ent = dict(pub)
ent["ID"] = ent.pop("_id")
ent["ENTRYTYPE"] = ent.pop("entrytype")
for n in ["author", "editor"]:
if n in ent:
ent[n] = " and ".join(ent[n])
for key in ent.keys():
if key in skip_keys:
continue
ent[key] = latex_safe(str(ent[key]))
ents.append(ent)
fname = os.path.join(person_dir, pid) + ".bib"
with open(fname, "w", encoding="utf-8") as f:
f.write(bibwriter.write(bibdb))
return fname
def document_by_value(documents, address, value):
"""Get a specific document by one of its values
Parameters
----------
documents: generator
Generator which yields the documents
address: str or tuple
The address of the data in the document
value: any
The expected value for the document
Returns
-------
dict:
The first document which matches the request
"""
if isinstance(address, str):
address = (address,)
for g_doc in documents:
doc = deepcopy(g_doc)
for add in address:
doc = doc[add]
if doc == value:
return g_doc
def fuzzy_retrieval(documents, sources, value, case_sensitive=True):
"""Retrieve a document from the documents where value is compared against
multiple potential sources
Parameters
----------
documents: generator
The documents
sources: iterable
The potential data sources
value:
The value to compare against to find the document of interest
case_sensitive: Bool
When true will match case (Default = True)
Returns
-------
dict:
The document
Examples
--------
>>> fuzzy_retrieval(people, ['aka', 'name'], 'pi_name', case_sensitive = False)
This would get the person entry for which either the alias or the name was
``pi_name``.
"""
for doc in documents:
returns = []
for k in sources:
ret = doc.get(k, [])
if not isinstance(ret, list):
ret = [ret]
returns.extend(ret)
if not case_sensitive:
returns = [reti.lower() for reti in returns if
isinstance(reti, str)]
if isinstance(value, str):
if value.lower() in frozenset(returns):
return doc
else:
if value in frozenset(returns):
return doc
def number_suffix(number):
"""returns the suffix that adjectivises a number (st, nd, rd, th)
Paramters
---------
number: integer
The number. If number is not an integer, returns an empty string
Returns
-------
suffix: string
The suffix (st, nd, rd, th)
"""
if not isinstance(number, (int, float)):
return ""
if 10 < number < 20:
suffix = "th"
else:
suffix = {1: "st", 2: "nd", 3: "rd"}.get(number % 10, "th")
return suffix
def dereference_institution(input_record, institutions):
"""Tool for replacing placeholders for institutions with the actual
institution data. Note that the replacement is done inplace
Parameters
----------
input_record : dict
The record to dereference
institutions : iterable of dicts
The institutions
"""
inst = input_record.get("institution") or input_record.get("organization")
if not inst:
error = input_record.get("position") or input_record.get("degree")
print("WARNING: no institution or organization but found {}".format(
error))
db_inst = fuzzy_retrieval(institutions, ["name", "_id", "aka"], inst)
if db_inst:
input_record["institution"] = db_inst["name"]
input_record["organization"] = db_inst["name"]
if db_inst.get("country") == "USA":
state_country = db_inst.get("state")
else:
state_country = db_inst.get("country")
input_record["location"] = "{}, {}".format(db_inst["city"],
state_country)
if not db_inst.get("departments"):
print("WARNING: no departments in {}. {} sought".format(
db_inst.get("_id"), inst))
if "department" in input_record and db_inst.get("departments"):
input_record["department"] = fuzzy_retrieval(
[db_inst["departments"]], ["name", "aka"],
input_record["department"]
)
else:
input_record["department"] = inst
def merge_collections(a, b, target_id):
"""
merge two collections into a single merged collection
for keys that are in both collections, the value in b will be kept
Parameters
----------
a the inferior collection (will lose values of shared keys)
b the superior collection (will keep values of shared keys)
target_id str the name of the key used in b to dereference ids in a
Returns
-------
the combined collection. Note that it returns a collection only containing
merged items from a and b that are dereferenced in b, i.e., the merged
intercept. If you want the union you can update the returned collection
with a.
Examples
--------
>>> grants = merge_collections(self.gtx["proposals"], self.gtx["grants"], "proposal_id")
This would merge all entries in the proposals collection with entries in the
grants collection for which "_id" in proposals has the value of
"proposal_id" in grants.
"""
adict = {}
for k in a:
adict[k.get("_id")] = k
bdict = {}
for k in b:
bdict[k.get("_id")] = k
b_for_a = {}
for k in adict:
for kk, v in bdict.items():
if v.get(target_id, "") == k:
b_for_a[k] = kk
chained = {}
for k, v in b_for_a.items():
chained[k] = ChainDB(adict[k], bdict[v])
return list(chained.values())
def update_schemas(default_schema, user_schema):
"""
Merging the user schema into the default schema recursively and return the
merged schema. The default schema and user schema will not be modified
during the merging.
Parameters
----------
default_schema : dict
The default schema.
user_schema : dict
The user defined schema.
Returns
-------
updated_schema : dict
The merged schema.
"""
updated_schema = deepcopy(default_schema)
for key in user_schema.keys():
if (key in updated_schema) and isinstance(updated_schema[key],
dict) and isinstance(
user_schema[key], dict):
updated_schema[key] = update_schemas(updated_schema[key],
user_schema[key])
else:
updated_schema[key] = user_schema[key]
return updated_schema
def is_fully_loaded(appts):
status = True
earliest, latest = date.today(), date.today()
for appt in appts:
dates = get_dates(appt)
begin_date = dates['begin_date']
end_date = dates['end_date']
if latest == date.today():
latest = end_date
appt['begin_date'] = begin_date
appt['end_date'] = end_date
if begin_date < earliest:
earliest = begin_date
if end_date > latest:
latest = end_date
datearray = []
timespan = latest - earliest
for x in range(0, timespan.days):
datearray.append(earliest + timedelta(days=x))
loading = [0] * len(datearray)
for day in datearray:
for appt in appts:
if appt['begin_date'] <= day <= appt["end_date"]:
loading[datearray.index(day)] = loading[datearray.index(day)] + \
appt.get("loading")
if max(loading) > 1.0:
status = False
print("max {} at {}".format(max(loading),
datearray[
list(loading).index(max(loading))]))
elif min(loading) < 1.0:
status = False
print("min {} at {}".format(min(loading),
datearray[list(loading).index(min(loading))]
))
return status
def group(db, by):
"""
Group the document in the database according to the value of the doc[by] in db.
Parameters
----------
db : iterable
The database of documents.
by : basestring
The key to group the documents.
Returns
-------
grouped: dict
A dictionary mapping the feature value of group to the list of docs. All docs in the same generator have
the same value of doc[by].
Examples
--------
Here, we use a tuple of dict as an example of the database.
>>> db = ({"k": "v0"}, {"k": "v1"}, {"k": "v0"})
>>> group(db)
This will return
>>> {"v0": [{"k": "v0"}, {"k": "v0"}], "v1": [{"k": "v1"}]}
"""
grouped = {}
doc: dict
for doc in db:
key = doc.get(by)
if not key:
print("There is no field {} in {}".format(by, id_key(doc)))
elif key not in grouped:
grouped[key] = [doc]
else:
grouped[key].append(doc)
return grouped
def get_pi_id(rc):
"""
Gets the database id of the group PI
Parameters
----------
rc: runcontrol object
The runcontrol object. It must contain the 'groups' and 'people'
collections in the needed databases
Returns
-------
The database '_id' of the group PI
"""
groupiter = list(all_docs_from_collection(rc.client, "groups"))
peoplecoll = all_docs_from_collection(rc.client, "people")
pi_ref = [i.get("pi_name") for i in groupiter if
i.get("name").casefold() == rc.groupname.casefold()]
pi = fuzzy_retrieval(peoplecoll, ["_id", "aka", "name"], pi_ref[0])
return pi.get("_id")
def group_member_ids(ppl_coll, grpname):
"""Get a list of all group member ids
Parameters
----------
ppl_coll: collection (list of dicts)
The people collection that should contain the group members
grp: string
The id of the group in groups.yml
Returns
-------
set:
The set of ids of the people in the group
Notes
-----
- Groups that are being tracked are listed in the groups.yml collection
with a name and an id.
- People are in a group during an educational or employment period.
- To assign a person to a tracked group during one such period, add
a "group" key to that education/employment item with a value
that is the group id.
- This function takes the group id that is passed and searches
the people collection for all people that have been
assigned to that group in some period of time and returns a list of
"""
grpmembers = set()
for person in ppl_coll:
for k in ["education", "employment"]:
for position in person.get(k, {}):
if position.get("group", None) == grpname:
grpmembers.add(person["_id"])
return grpmembers
|
has_started
|
true if today is after the dates given, inclusive
Parameters
----------
sy : int
the year to check today against
sm : int or str.
the month to check today against. Should be integer or in regolith MONTHS.
default is 1
sd : int.
the day to check today against. Default is 1
Returns
-------
bool
true if today is after dates given
|
"""Misc. regolith tools.
"""
import email.utils
import os
import platform
import re
import sys
import time
from copy import deepcopy
from calendar import monthrange
from datetime import datetime, date, timedelta
from regolith.dates import month_to_int, date_to_float, get_dates
from regolith.sorters import doc_date_key, id_key, ene_date_key
from regolith.chained_db import ChainDB
try:
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
HAVE_BIBTEX_PARSER = True
except ImportError:
HAVE_BIBTEX_PARSER = False
LATEX_OPTS = ["-halt-on-error", "-file-line-error"]
if sys.version_info[0] >= 3:
string_types = (str, bytes)
unicode_type = str
else:
pass
# string_types = (str, unicode)
# unicode_type = unicode
DEFAULT_ENCODING = sys.getdefaultencoding()
ON_WINDOWS = platform.system() == "Windows"
ON_MAC = platform.system() == "Darwin"
ON_LINUX = platform.system() == "Linux"
ON_POSIX = os.name == "posix"
def dbdirname(db, rc):
"""Gets the database dir name."""
if db.get("local", False) is False:
dbsdir = os.path.join(rc.builddir, "_dbs")
dbdir = os.path.join(dbsdir, db["name"])
else:
dbdir = db["url"]
return dbdir
def dbpathname(db, rc):
"""Gets the database path name."""
dbdir = dbdirname(db, rc)
dbpath = os.path.join(dbdir, db["path"])
return dbpath
def fallback(cond, backup):
"""Decorator for returning the object if cond is true and a backup if
cond is false. """
def dec(obj):
return obj if cond else backup
return dec
def all_docs_from_collection(client, collname, copy=True):
"""Yield all entries in for all collections of a given name in a given
database. """
yield from client.all_documents(collname, copy=copy)
SHORT_MONTH_NAMES = (
None,
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sept",
"Oct",
"Nov",
"Dec",
)
def date_to_rfc822(y, m, d=1):
"""Converts a date to an RFC 822 formatted string."""
d = datetime(int(y), month_to_int(m), int(d))
return email.utils.format_datetime(d)
def rfc822now():
"""Creates a string of the current time according to RFC 822."""
now = datetime.utcnow()
return email.utils.format_datetime(now)
def gets(seq, key, default=None):
"""Gets a key from every element of a sequence if possible."""
for x in seq:
yield x.get(key, default)
def month_and_year(m=None, y=None):
"""Creates a string from month and year data, if available."""
if y is None:
return "present"
if m is None:
return str(y)
m = month_to_int(m)
return "{0} {1}".format(SHORT_MONTH_NAMES[m], y)
def is_since(y, sy, m=1, d=1, sm=1, sd=1):
"""
tests whether a date is on or since another date
Parameters
----------
y : int
the year to be tested
sy : int
the since year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
sd: int
the since day. Optional, defaults to 1
Returns
-------
True if the target date is the same as, or more recent than, the since date
"""
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
d = "{}/{}/{}".format(d, month_to_int(m), y)
since = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return since <= date
def is_before(y, by, m=12, d=None, bm=12, bd=None):
"""
tests whether a date is on or before another date
Parameters
----------
y : int
the year to be tested
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Dec
d : int
the day to be tested. Defaults to last day of the month
bm : int or str
the before month. Optional, defaults to Dec
bd: int
the before day. Optional, defaults to last day of the month
Returns
-------
True if the target date is the same as, or earlier than, the before date
"""
if not d:
d = monthrange(y, month_to_int(m))[1]
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
b = "{}/{}/{}".format(bd, month_to_int(bm), by)
d = "{}/{}/{}".format(d, month_to_int(m), y)
before = time.mktime(datetime.strptime(b, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return before >= date
def is_between(y, sy, by, m=1, d=1, sm=1, sd=1, bm=12, bd=None):
"""
tests whether a date is on or between two other dates
returns true if the target date is between the since date and the before
date, inclusive.
Parameters
----------
y : int
the year to be tested
sy : int
the since year
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
bm : int or str
the before month. Optional, defaults to Dec
sd: int
the since day. Optional, defaults to 1
bd: int
the before day. Optional, defaults to 28
Returns
-------
True if the target date is between the since date and the before date,
inclusive (i.e., returns true if the target date is the same as either the
since date or the before date)
"""
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
return is_since(y, sy, m=m, d=d, sm=sm, sd=sd) and is_before(
y, by, m=m, d=d, bm=bm, bd=bd
)
# MASKED: has_started function (lines 231-256)
def has_finished(ey, em=None, ed=None):
"""
true if today is before the dates given, inclusive
Parameters
----------
ey : int
end year, the year to check today against
em : int or str.
end month, the month to check today against. Should be integer or in regolith MONTHS.
default is 1
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
if not em:
em = 12
if not ed:
ed = monthrange(ey, month_to_int(em))[1]
e = "{}/{}/{}".format(ed, month_to_int(em), ey)
end = time.mktime(datetime.strptime(e, "%d/%m/%Y").timetuple())
return end <= time.time()
def is_current(sy, ey, sm=None, sd=None, em=None, ed=None):
"""
true if today is between the dates given, inclusive
Parameters
----------
sy : int
start year, the year to check today is after
ey : int
end year, the year to check today is before
sm : int or str
start month, the month to check today is after. Should be integer or in
regolith MONTHS. Default is 1
sd : int
start day, the day to check today after. Default is 1
em : int or str.
end month, the month to check today against. Should be integer or in
regolith MONTHS. Default is 12
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
return has_started(sy, sm, sd) and not has_finished(ey, em, ed)
def filter_publications(citations, authors, reverse=False, bold=True):
"""Filter publications by the author(s)/editor(s)
Parameters
----------
citations : list of dict
The publication citations
authors : set of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
bold : bool, optional
If True put latex bold around the author(s) in question
"""
pubs = []
for pub in citations:
if (
len((set(pub.get("author", [])) | set(
pub.get("editor", []))) & authors)
== 0
):
continue
pub = deepcopy(pub)
if bold:
bold_self = []
for a in pub["author"]:
if a in authors:
bold_self.append("\\textbf{" + a + "}")
else:
bold_self.append(a)
pub["author"] = bold_self
else:
pub = deepcopy(pub)
pubs.append(pub)
pubs.sort(key=doc_date_key, reverse=reverse)
return pubs
def filter_projects(projects, authors, reverse=False):
"""Filter projects by the author(s)
Parameters
----------
projects : list of dict
The publication citations
authors : set of list of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
"""
projs = []
for proj in projects:
team_names = set(gets(proj["team"], "name"))
if len(team_names & authors) == 0:
continue
# FIXME delete these lines if not required. I think they are wrong (SJLB)
# proj = dict(proj)
# proj["team"] = [x for x in proj["team"] if x["name"] in authors]
projs.append(proj)
projs.sort(key=id_key, reverse=reverse)
return projs
def filter_grants(input_grants, names, pi=True, reverse=True, multi_pi=False):
"""Filter grants by those involved
Parameters
----------
input_grants : list of dict
The grants to filter
names : set of str
The authors to be filtered against
pi : bool, optional
If True add the grant amount to that person's total amount
reverse : bool, optional
If True reverse the order, defaults to False
multi_pi : bool, optional
If True compute sub-awards for multi PI grants, defaults to False
"""
grants = []
total_amount = 0.0
subaward_amount = 0.0
for grant in input_grants:
team_names = set(gets(grant["team"], "name"))
if len(team_names & names) == 0:
continue
grant = deepcopy(grant)
person = [x for x in grant["team"] if x["name"] in names][0]
if pi:
if person["position"].lower() == "pi":
total_amount += grant["amount"]
else:
continue
elif multi_pi:
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["multi_pi"] = any(gets(grant["team"], "subaward_amount"))
else:
if person["position"].lower() == "pi":
continue
else:
total_amount += grant["amount"]
subaward_amount += person.get("subaward_amount", 0.0)
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["pi"] = [
x for x in grant["team"] if x["position"].lower() == "pi"
][0]
grant["me"] = person
grants.append(grant)
grants.sort(key=ene_date_key, reverse=reverse)
return grants, total_amount, subaward_amount
def awards_grants_honors(p):
"""Make sorted awards grants and honors list.
Parameters
----------
p : dict
The person entry
"""
aghs = []
for x in p.get("funding", ()):
d = {
"description": "{0} ({1}{2:,})".format(
latex_safe(x["name"]),
x.get("currency", "$").replace("$", "\$"),
x["value"],
),
"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0)),
}
aghs.append(d)
for x in p.get("service", []) + p.get("honors", []):
d = {"description": latex_safe(x["name"])}
if "year" in x:
d.update(
{"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0))}
)
elif "begin_year" in x and "end_year" in x:
d.update(
{
"year": "{}-{}".format(x["begin_year"], x["end_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
elif "begin_year" in x:
d.update(
{
"year": "{}".format(x["begin_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
aghs.append(d)
aghs.sort(key=(lambda x: x.get("_key", 0.0)), reverse=True)
return aghs
HTTP_RE = re.compile(
r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
)
def latex_safe_url(s):
"""Makes a string that is a URL latex safe."""
return s.replace("#", r"\#")
def latex_safe(s, url_check=True, wrapper="url"):
"""Make string latex safe
Parameters
----------
s : str
url_check : bool, optional
If True check for URLs and wrap them, if False check for URL but don't
wrap, defaults to True
wrapper : str, optional
The wrapper for wrapping urls defaults to url
"""
if not s:
return s
if url_check:
# If it looks like a URL make it a latex URL
url_search = HTTP_RE.search(s)
if url_search:
url = r"{start}\{wrapper}{{{s}}}{end}".format(
start=(latex_safe(s[: url_search.start()])),
end=(latex_safe(s[url_search.end():])),
wrapper=wrapper,
s=latex_safe_url(s[url_search.start(): url_search.end()]),
)
return url
return (
s.replace("&", r"\&")
.replace("$", r"\$")
.replace("#", r"\#")
.replace("_", r"\_")
)
def make_bibtex_file(pubs, pid, person_dir="."):
"""Make a bibtex file given the publications
Parameters
----------
pubs : list of dict
The publications
pid : str
The person id
person_dir : str, optional
The person's directory
"""
if not HAVE_BIBTEX_PARSER:
return None
skip_keys = {"ID", "ENTRYTYPE", "author"}
bibdb = BibDatabase()
bibwriter = BibTexWriter()
bibdb.entries = ents = []
for pub in pubs:
ent = dict(pub)
ent["ID"] = ent.pop("_id")
ent["ENTRYTYPE"] = ent.pop("entrytype")
for n in ["author", "editor"]:
if n in ent:
ent[n] = " and ".join(ent[n])
for key in ent.keys():
if key in skip_keys:
continue
ent[key] = latex_safe(str(ent[key]))
ents.append(ent)
fname = os.path.join(person_dir, pid) + ".bib"
with open(fname, "w", encoding="utf-8") as f:
f.write(bibwriter.write(bibdb))
return fname
def document_by_value(documents, address, value):
"""Get a specific document by one of its values
Parameters
----------
documents: generator
Generator which yields the documents
address: str or tuple
The address of the data in the document
value: any
The expected value for the document
Returns
-------
dict:
The first document which matches the request
"""
if isinstance(address, str):
address = (address,)
for g_doc in documents:
doc = deepcopy(g_doc)
for add in address:
doc = doc[add]
if doc == value:
return g_doc
def fuzzy_retrieval(documents, sources, value, case_sensitive=True):
"""Retrieve a document from the documents where value is compared against
multiple potential sources
Parameters
----------
documents: generator
The documents
sources: iterable
The potential data sources
value:
The value to compare against to find the document of interest
case_sensitive: Bool
When true will match case (Default = True)
Returns
-------
dict:
The document
Examples
--------
>>> fuzzy_retrieval(people, ['aka', 'name'], 'pi_name', case_sensitive = False)
This would get the person entry for which either the alias or the name was
``pi_name``.
"""
for doc in documents:
returns = []
for k in sources:
ret = doc.get(k, [])
if not isinstance(ret, list):
ret = [ret]
returns.extend(ret)
if not case_sensitive:
returns = [reti.lower() for reti in returns if
isinstance(reti, str)]
if isinstance(value, str):
if value.lower() in frozenset(returns):
return doc
else:
if value in frozenset(returns):
return doc
def number_suffix(number):
"""returns the suffix that adjectivises a number (st, nd, rd, th)
Paramters
---------
number: integer
The number. If number is not an integer, returns an empty string
Returns
-------
suffix: string
The suffix (st, nd, rd, th)
"""
if not isinstance(number, (int, float)):
return ""
if 10 < number < 20:
suffix = "th"
else:
suffix = {1: "st", 2: "nd", 3: "rd"}.get(number % 10, "th")
return suffix
def dereference_institution(input_record, institutions):
"""Tool for replacing placeholders for institutions with the actual
institution data. Note that the replacement is done inplace
Parameters
----------
input_record : dict
The record to dereference
institutions : iterable of dicts
The institutions
"""
inst = input_record.get("institution") or input_record.get("organization")
if not inst:
error = input_record.get("position") or input_record.get("degree")
print("WARNING: no institution or organization but found {}".format(
error))
db_inst = fuzzy_retrieval(institutions, ["name", "_id", "aka"], inst)
if db_inst:
input_record["institution"] = db_inst["name"]
input_record["organization"] = db_inst["name"]
if db_inst.get("country") == "USA":
state_country = db_inst.get("state")
else:
state_country = db_inst.get("country")
input_record["location"] = "{}, {}".format(db_inst["city"],
state_country)
if not db_inst.get("departments"):
print("WARNING: no departments in {}. {} sought".format(
db_inst.get("_id"), inst))
if "department" in input_record and db_inst.get("departments"):
input_record["department"] = fuzzy_retrieval(
[db_inst["departments"]], ["name", "aka"],
input_record["department"]
)
else:
input_record["department"] = inst
def merge_collections(a, b, target_id):
"""
merge two collections into a single merged collection
for keys that are in both collections, the value in b will be kept
Parameters
----------
a the inferior collection (will lose values of shared keys)
b the superior collection (will keep values of shared keys)
target_id str the name of the key used in b to dereference ids in a
Returns
-------
the combined collection. Note that it returns a collection only containing
merged items from a and b that are dereferenced in b, i.e., the merged
intercept. If you want the union you can update the returned collection
with a.
Examples
--------
>>> grants = merge_collections(self.gtx["proposals"], self.gtx["grants"], "proposal_id")
This would merge all entries in the proposals collection with entries in the
grants collection for which "_id" in proposals has the value of
"proposal_id" in grants.
"""
adict = {}
for k in a:
adict[k.get("_id")] = k
bdict = {}
for k in b:
bdict[k.get("_id")] = k
b_for_a = {}
for k in adict:
for kk, v in bdict.items():
if v.get(target_id, "") == k:
b_for_a[k] = kk
chained = {}
for k, v in b_for_a.items():
chained[k] = ChainDB(adict[k], bdict[v])
return list(chained.values())
def update_schemas(default_schema, user_schema):
"""
Merging the user schema into the default schema recursively and return the
merged schema. The default schema and user schema will not be modified
during the merging.
Parameters
----------
default_schema : dict
The default schema.
user_schema : dict
The user defined schema.
Returns
-------
updated_schema : dict
The merged schema.
"""
updated_schema = deepcopy(default_schema)
for key in user_schema.keys():
if (key in updated_schema) and isinstance(updated_schema[key],
dict) and isinstance(
user_schema[key], dict):
updated_schema[key] = update_schemas(updated_schema[key],
user_schema[key])
else:
updated_schema[key] = user_schema[key]
return updated_schema
def is_fully_loaded(appts):
status = True
earliest, latest = date.today(), date.today()
for appt in appts:
dates = get_dates(appt)
begin_date = dates['begin_date']
end_date = dates['end_date']
if latest == date.today():
latest = end_date
appt['begin_date'] = begin_date
appt['end_date'] = end_date
if begin_date < earliest:
earliest = begin_date
if end_date > latest:
latest = end_date
datearray = []
timespan = latest - earliest
for x in range(0, timespan.days):
datearray.append(earliest + timedelta(days=x))
loading = [0] * len(datearray)
for day in datearray:
for appt in appts:
if appt['begin_date'] <= day <= appt["end_date"]:
loading[datearray.index(day)] = loading[datearray.index(day)] + \
appt.get("loading")
if max(loading) > 1.0:
status = False
print("max {} at {}".format(max(loading),
datearray[
list(loading).index(max(loading))]))
elif min(loading) < 1.0:
status = False
print("min {} at {}".format(min(loading),
datearray[list(loading).index(min(loading))]
))
return status
def group(db, by):
"""
Group the document in the database according to the value of the doc[by] in db.
Parameters
----------
db : iterable
The database of documents.
by : basestring
The key to group the documents.
Returns
-------
grouped: dict
A dictionary mapping the feature value of group to the list of docs. All docs in the same generator have
the same value of doc[by].
Examples
--------
Here, we use a tuple of dict as an example of the database.
>>> db = ({"k": "v0"}, {"k": "v1"}, {"k": "v0"})
>>> group(db)
This will return
>>> {"v0": [{"k": "v0"}, {"k": "v0"}], "v1": [{"k": "v1"}]}
"""
grouped = {}
doc: dict
for doc in db:
key = doc.get(by)
if not key:
print("There is no field {} in {}".format(by, id_key(doc)))
elif key not in grouped:
grouped[key] = [doc]
else:
grouped[key].append(doc)
return grouped
def get_pi_id(rc):
"""
Gets the database id of the group PI
Parameters
----------
rc: runcontrol object
The runcontrol object. It must contain the 'groups' and 'people'
collections in the needed databases
Returns
-------
The database '_id' of the group PI
"""
groupiter = list(all_docs_from_collection(rc.client, "groups"))
peoplecoll = all_docs_from_collection(rc.client, "people")
pi_ref = [i.get("pi_name") for i in groupiter if
i.get("name").casefold() == rc.groupname.casefold()]
pi = fuzzy_retrieval(peoplecoll, ["_id", "aka", "name"], pi_ref[0])
return pi.get("_id")
def group_member_ids(ppl_coll, grpname):
"""Get a list of all group member ids
Parameters
----------
ppl_coll: collection (list of dicts)
The people collection that should contain the group members
grp: string
The id of the group in groups.yml
Returns
-------
set:
The set of ids of the people in the group
Notes
-----
- Groups that are being tracked are listed in the groups.yml collection
with a name and an id.
- People are in a group during an educational or employment period.
- To assign a person to a tracked group during one such period, add
a "group" key to that education/employment item with a value
that is the group id.
- This function takes the group id that is passed and searches
the people collection for all people that have been
assigned to that group in some period of time and returns a list of
"""
grpmembers = set()
for person in ppl_coll:
for k in ["education", "employment"]:
for position in person.get(k, {}):
if position.get("group", None) == grpname:
grpmembers.add(person["_id"])
return grpmembers
|
def has_started(sy, sm=None, sd=None):
"""
true if today is after the dates given, inclusive
Parameters
----------
sy : int
the year to check today against
sm : int or str.
the month to check today against. Should be integer or in regolith MONTHS.
default is 1
sd : int.
the day to check today against. Default is 1
Returns
-------
bool
true if today is after dates given
"""
if not sm:
sm = 1
if not sd:
sd = 1
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
start = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
return start <= time.time()
| 231 | 256 |
"""Misc. regolith tools.
"""
import email.utils
import os
import platform
import re
import sys
import time
from copy import deepcopy
from calendar import monthrange
from datetime import datetime, date, timedelta
from regolith.dates import month_to_int, date_to_float, get_dates
from regolith.sorters import doc_date_key, id_key, ene_date_key
from regolith.chained_db import ChainDB
try:
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
HAVE_BIBTEX_PARSER = True
except ImportError:
HAVE_BIBTEX_PARSER = False
LATEX_OPTS = ["-halt-on-error", "-file-line-error"]
if sys.version_info[0] >= 3:
string_types = (str, bytes)
unicode_type = str
else:
pass
# string_types = (str, unicode)
# unicode_type = unicode
DEFAULT_ENCODING = sys.getdefaultencoding()
ON_WINDOWS = platform.system() == "Windows"
ON_MAC = platform.system() == "Darwin"
ON_LINUX = platform.system() == "Linux"
ON_POSIX = os.name == "posix"
def dbdirname(db, rc):
"""Gets the database dir name."""
if db.get("local", False) is False:
dbsdir = os.path.join(rc.builddir, "_dbs")
dbdir = os.path.join(dbsdir, db["name"])
else:
dbdir = db["url"]
return dbdir
def dbpathname(db, rc):
"""Gets the database path name."""
dbdir = dbdirname(db, rc)
dbpath = os.path.join(dbdir, db["path"])
return dbpath
def fallback(cond, backup):
"""Decorator for returning the object if cond is true and a backup if
cond is false. """
def dec(obj):
return obj if cond else backup
return dec
def all_docs_from_collection(client, collname, copy=True):
"""Yield all entries in for all collections of a given name in a given
database. """
yield from client.all_documents(collname, copy=copy)
SHORT_MONTH_NAMES = (
None,
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sept",
"Oct",
"Nov",
"Dec",
)
def date_to_rfc822(y, m, d=1):
"""Converts a date to an RFC 822 formatted string."""
d = datetime(int(y), month_to_int(m), int(d))
return email.utils.format_datetime(d)
def rfc822now():
"""Creates a string of the current time according to RFC 822."""
now = datetime.utcnow()
return email.utils.format_datetime(now)
def gets(seq, key, default=None):
"""Gets a key from every element of a sequence if possible."""
for x in seq:
yield x.get(key, default)
def month_and_year(m=None, y=None):
"""Creates a string from month and year data, if available."""
if y is None:
return "present"
if m is None:
return str(y)
m = month_to_int(m)
return "{0} {1}".format(SHORT_MONTH_NAMES[m], y)
def is_since(y, sy, m=1, d=1, sm=1, sd=1):
"""
tests whether a date is on or since another date
Parameters
----------
y : int
the year to be tested
sy : int
the since year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
sd: int
the since day. Optional, defaults to 1
Returns
-------
True if the target date is the same as, or more recent than, the since date
"""
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
d = "{}/{}/{}".format(d, month_to_int(m), y)
since = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return since <= date
def is_before(y, by, m=12, d=None, bm=12, bd=None):
"""
tests whether a date is on or before another date
Parameters
----------
y : int
the year to be tested
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Dec
d : int
the day to be tested. Defaults to last day of the month
bm : int or str
the before month. Optional, defaults to Dec
bd: int
the before day. Optional, defaults to last day of the month
Returns
-------
True if the target date is the same as, or earlier than, the before date
"""
if not d:
d = monthrange(y, month_to_int(m))[1]
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
b = "{}/{}/{}".format(bd, month_to_int(bm), by)
d = "{}/{}/{}".format(d, month_to_int(m), y)
before = time.mktime(datetime.strptime(b, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return before >= date
def is_between(y, sy, by, m=1, d=1, sm=1, sd=1, bm=12, bd=None):
"""
tests whether a date is on or between two other dates
returns true if the target date is between the since date and the before
date, inclusive.
Parameters
----------
y : int
the year to be tested
sy : int
the since year
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
bm : int or str
the before month. Optional, defaults to Dec
sd: int
the since day. Optional, defaults to 1
bd: int
the before day. Optional, defaults to 28
Returns
-------
True if the target date is between the since date and the before date,
inclusive (i.e., returns true if the target date is the same as either the
since date or the before date)
"""
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
return is_since(y, sy, m=m, d=d, sm=sm, sd=sd) and is_before(
y, by, m=m, d=d, bm=bm, bd=bd
)
def has_started(sy, sm=None, sd=None):
"""
true if today is after the dates given, inclusive
Parameters
----------
sy : int
the year to check today against
sm : int or str.
the month to check today against. Should be integer or in regolith MONTHS.
default is 1
sd : int.
the day to check today against. Default is 1
Returns
-------
bool
true if today is after dates given
"""
if not sm:
sm = 1
if not sd:
sd = 1
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
start = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
return start <= time.time()
def has_finished(ey, em=None, ed=None):
"""
true if today is before the dates given, inclusive
Parameters
----------
ey : int
end year, the year to check today against
em : int or str.
end month, the month to check today against. Should be integer or in regolith MONTHS.
default is 1
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
if not em:
em = 12
if not ed:
ed = monthrange(ey, month_to_int(em))[1]
e = "{}/{}/{}".format(ed, month_to_int(em), ey)
end = time.mktime(datetime.strptime(e, "%d/%m/%Y").timetuple())
return end <= time.time()
def is_current(sy, ey, sm=None, sd=None, em=None, ed=None):
"""
true if today is between the dates given, inclusive
Parameters
----------
sy : int
start year, the year to check today is after
ey : int
end year, the year to check today is before
sm : int or str
start month, the month to check today is after. Should be integer or in
regolith MONTHS. Default is 1
sd : int
start day, the day to check today after. Default is 1
em : int or str.
end month, the month to check today against. Should be integer or in
regolith MONTHS. Default is 12
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
return has_started(sy, sm, sd) and not has_finished(ey, em, ed)
def filter_publications(citations, authors, reverse=False, bold=True):
"""Filter publications by the author(s)/editor(s)
Parameters
----------
citations : list of dict
The publication citations
authors : set of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
bold : bool, optional
If True put latex bold around the author(s) in question
"""
pubs = []
for pub in citations:
if (
len((set(pub.get("author", [])) | set(
pub.get("editor", []))) & authors)
== 0
):
continue
pub = deepcopy(pub)
if bold:
bold_self = []
for a in pub["author"]:
if a in authors:
bold_self.append("\\textbf{" + a + "}")
else:
bold_self.append(a)
pub["author"] = bold_self
else:
pub = deepcopy(pub)
pubs.append(pub)
pubs.sort(key=doc_date_key, reverse=reverse)
return pubs
def filter_projects(projects, authors, reverse=False):
"""Filter projects by the author(s)
Parameters
----------
projects : list of dict
The publication citations
authors : set of list of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
"""
projs = []
for proj in projects:
team_names = set(gets(proj["team"], "name"))
if len(team_names & authors) == 0:
continue
# FIXME delete these lines if not required. I think they are wrong (SJLB)
# proj = dict(proj)
# proj["team"] = [x for x in proj["team"] if x["name"] in authors]
projs.append(proj)
projs.sort(key=id_key, reverse=reverse)
return projs
def filter_grants(input_grants, names, pi=True, reverse=True, multi_pi=False):
"""Filter grants by those involved
Parameters
----------
input_grants : list of dict
The grants to filter
names : set of str
The authors to be filtered against
pi : bool, optional
If True add the grant amount to that person's total amount
reverse : bool, optional
If True reverse the order, defaults to False
multi_pi : bool, optional
If True compute sub-awards for multi PI grants, defaults to False
"""
grants = []
total_amount = 0.0
subaward_amount = 0.0
for grant in input_grants:
team_names = set(gets(grant["team"], "name"))
if len(team_names & names) == 0:
continue
grant = deepcopy(grant)
person = [x for x in grant["team"] if x["name"] in names][0]
if pi:
if person["position"].lower() == "pi":
total_amount += grant["amount"]
else:
continue
elif multi_pi:
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["multi_pi"] = any(gets(grant["team"], "subaward_amount"))
else:
if person["position"].lower() == "pi":
continue
else:
total_amount += grant["amount"]
subaward_amount += person.get("subaward_amount", 0.0)
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["pi"] = [
x for x in grant["team"] if x["position"].lower() == "pi"
][0]
grant["me"] = person
grants.append(grant)
grants.sort(key=ene_date_key, reverse=reverse)
return grants, total_amount, subaward_amount
def awards_grants_honors(p):
"""Make sorted awards grants and honors list.
Parameters
----------
p : dict
The person entry
"""
aghs = []
for x in p.get("funding", ()):
d = {
"description": "{0} ({1}{2:,})".format(
latex_safe(x["name"]),
x.get("currency", "$").replace("$", "\$"),
x["value"],
),
"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0)),
}
aghs.append(d)
for x in p.get("service", []) + p.get("honors", []):
d = {"description": latex_safe(x["name"])}
if "year" in x:
d.update(
{"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0))}
)
elif "begin_year" in x and "end_year" in x:
d.update(
{
"year": "{}-{}".format(x["begin_year"], x["end_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
elif "begin_year" in x:
d.update(
{
"year": "{}".format(x["begin_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
aghs.append(d)
aghs.sort(key=(lambda x: x.get("_key", 0.0)), reverse=True)
return aghs
HTTP_RE = re.compile(
r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
)
def latex_safe_url(s):
"""Makes a string that is a URL latex safe."""
return s.replace("#", r"\#")
def latex_safe(s, url_check=True, wrapper="url"):
"""Make string latex safe
Parameters
----------
s : str
url_check : bool, optional
If True check for URLs and wrap them, if False check for URL but don't
wrap, defaults to True
wrapper : str, optional
The wrapper for wrapping urls defaults to url
"""
if not s:
return s
if url_check:
# If it looks like a URL make it a latex URL
url_search = HTTP_RE.search(s)
if url_search:
url = r"{start}\{wrapper}{{{s}}}{end}".format(
start=(latex_safe(s[: url_search.start()])),
end=(latex_safe(s[url_search.end():])),
wrapper=wrapper,
s=latex_safe_url(s[url_search.start(): url_search.end()]),
)
return url
return (
s.replace("&", r"\&")
.replace("$", r"\$")
.replace("#", r"\#")
.replace("_", r"\_")
)
def make_bibtex_file(pubs, pid, person_dir="."):
"""Make a bibtex file given the publications
Parameters
----------
pubs : list of dict
The publications
pid : str
The person id
person_dir : str, optional
The person's directory
"""
if not HAVE_BIBTEX_PARSER:
return None
skip_keys = {"ID", "ENTRYTYPE", "author"}
bibdb = BibDatabase()
bibwriter = BibTexWriter()
bibdb.entries = ents = []
for pub in pubs:
ent = dict(pub)
ent["ID"] = ent.pop("_id")
ent["ENTRYTYPE"] = ent.pop("entrytype")
for n in ["author", "editor"]:
if n in ent:
ent[n] = " and ".join(ent[n])
for key in ent.keys():
if key in skip_keys:
continue
ent[key] = latex_safe(str(ent[key]))
ents.append(ent)
fname = os.path.join(person_dir, pid) + ".bib"
with open(fname, "w", encoding="utf-8") as f:
f.write(bibwriter.write(bibdb))
return fname
def document_by_value(documents, address, value):
"""Get a specific document by one of its values
Parameters
----------
documents: generator
Generator which yields the documents
address: str or tuple
The address of the data in the document
value: any
The expected value for the document
Returns
-------
dict:
The first document which matches the request
"""
if isinstance(address, str):
address = (address,)
for g_doc in documents:
doc = deepcopy(g_doc)
for add in address:
doc = doc[add]
if doc == value:
return g_doc
def fuzzy_retrieval(documents, sources, value, case_sensitive=True):
"""Retrieve a document from the documents where value is compared against
multiple potential sources
Parameters
----------
documents: generator
The documents
sources: iterable
The potential data sources
value:
The value to compare against to find the document of interest
case_sensitive: Bool
When true will match case (Default = True)
Returns
-------
dict:
The document
Examples
--------
>>> fuzzy_retrieval(people, ['aka', 'name'], 'pi_name', case_sensitive = False)
This would get the person entry for which either the alias or the name was
``pi_name``.
"""
for doc in documents:
returns = []
for k in sources:
ret = doc.get(k, [])
if not isinstance(ret, list):
ret = [ret]
returns.extend(ret)
if not case_sensitive:
returns = [reti.lower() for reti in returns if
isinstance(reti, str)]
if isinstance(value, str):
if value.lower() in frozenset(returns):
return doc
else:
if value in frozenset(returns):
return doc
def number_suffix(number):
"""returns the suffix that adjectivises a number (st, nd, rd, th)
Paramters
---------
number: integer
The number. If number is not an integer, returns an empty string
Returns
-------
suffix: string
The suffix (st, nd, rd, th)
"""
if not isinstance(number, (int, float)):
return ""
if 10 < number < 20:
suffix = "th"
else:
suffix = {1: "st", 2: "nd", 3: "rd"}.get(number % 10, "th")
return suffix
def dereference_institution(input_record, institutions):
"""Tool for replacing placeholders for institutions with the actual
institution data. Note that the replacement is done inplace
Parameters
----------
input_record : dict
The record to dereference
institutions : iterable of dicts
The institutions
"""
inst = input_record.get("institution") or input_record.get("organization")
if not inst:
error = input_record.get("position") or input_record.get("degree")
print("WARNING: no institution or organization but found {}".format(
error))
db_inst = fuzzy_retrieval(institutions, ["name", "_id", "aka"], inst)
if db_inst:
input_record["institution"] = db_inst["name"]
input_record["organization"] = db_inst["name"]
if db_inst.get("country") == "USA":
state_country = db_inst.get("state")
else:
state_country = db_inst.get("country")
input_record["location"] = "{}, {}".format(db_inst["city"],
state_country)
if not db_inst.get("departments"):
print("WARNING: no departments in {}. {} sought".format(
db_inst.get("_id"), inst))
if "department" in input_record and db_inst.get("departments"):
input_record["department"] = fuzzy_retrieval(
[db_inst["departments"]], ["name", "aka"],
input_record["department"]
)
else:
input_record["department"] = inst
def merge_collections(a, b, target_id):
"""
merge two collections into a single merged collection
for keys that are in both collections, the value in b will be kept
Parameters
----------
a the inferior collection (will lose values of shared keys)
b the superior collection (will keep values of shared keys)
target_id str the name of the key used in b to dereference ids in a
Returns
-------
the combined collection. Note that it returns a collection only containing
merged items from a and b that are dereferenced in b, i.e., the merged
intercept. If you want the union you can update the returned collection
with a.
Examples
--------
>>> grants = merge_collections(self.gtx["proposals"], self.gtx["grants"], "proposal_id")
This would merge all entries in the proposals collection with entries in the
grants collection for which "_id" in proposals has the value of
"proposal_id" in grants.
"""
adict = {}
for k in a:
adict[k.get("_id")] = k
bdict = {}
for k in b:
bdict[k.get("_id")] = k
b_for_a = {}
for k in adict:
for kk, v in bdict.items():
if v.get(target_id, "") == k:
b_for_a[k] = kk
chained = {}
for k, v in b_for_a.items():
chained[k] = ChainDB(adict[k], bdict[v])
return list(chained.values())
def update_schemas(default_schema, user_schema):
"""
Merging the user schema into the default schema recursively and return the
merged schema. The default schema and user schema will not be modified
during the merging.
Parameters
----------
default_schema : dict
The default schema.
user_schema : dict
The user defined schema.
Returns
-------
updated_schema : dict
The merged schema.
"""
updated_schema = deepcopy(default_schema)
for key in user_schema.keys():
if (key in updated_schema) and isinstance(updated_schema[key],
dict) and isinstance(
user_schema[key], dict):
updated_schema[key] = update_schemas(updated_schema[key],
user_schema[key])
else:
updated_schema[key] = user_schema[key]
return updated_schema
def is_fully_loaded(appts):
status = True
earliest, latest = date.today(), date.today()
for appt in appts:
dates = get_dates(appt)
begin_date = dates['begin_date']
end_date = dates['end_date']
if latest == date.today():
latest = end_date
appt['begin_date'] = begin_date
appt['end_date'] = end_date
if begin_date < earliest:
earliest = begin_date
if end_date > latest:
latest = end_date
datearray = []
timespan = latest - earliest
for x in range(0, timespan.days):
datearray.append(earliest + timedelta(days=x))
loading = [0] * len(datearray)
for day in datearray:
for appt in appts:
if appt['begin_date'] <= day <= appt["end_date"]:
loading[datearray.index(day)] = loading[datearray.index(day)] + \
appt.get("loading")
if max(loading) > 1.0:
status = False
print("max {} at {}".format(max(loading),
datearray[
list(loading).index(max(loading))]))
elif min(loading) < 1.0:
status = False
print("min {} at {}".format(min(loading),
datearray[list(loading).index(min(loading))]
))
return status
def group(db, by):
"""
Group the document in the database according to the value of the doc[by] in db.
Parameters
----------
db : iterable
The database of documents.
by : basestring
The key to group the documents.
Returns
-------
grouped: dict
A dictionary mapping the feature value of group to the list of docs. All docs in the same generator have
the same value of doc[by].
Examples
--------
Here, we use a tuple of dict as an example of the database.
>>> db = ({"k": "v0"}, {"k": "v1"}, {"k": "v0"})
>>> group(db)
This will return
>>> {"v0": [{"k": "v0"}, {"k": "v0"}], "v1": [{"k": "v1"}]}
"""
grouped = {}
doc: dict
for doc in db:
key = doc.get(by)
if not key:
print("There is no field {} in {}".format(by, id_key(doc)))
elif key not in grouped:
grouped[key] = [doc]
else:
grouped[key].append(doc)
return grouped
def get_pi_id(rc):
"""
Gets the database id of the group PI
Parameters
----------
rc: runcontrol object
The runcontrol object. It must contain the 'groups' and 'people'
collections in the needed databases
Returns
-------
The database '_id' of the group PI
"""
groupiter = list(all_docs_from_collection(rc.client, "groups"))
peoplecoll = all_docs_from_collection(rc.client, "people")
pi_ref = [i.get("pi_name") for i in groupiter if
i.get("name").casefold() == rc.groupname.casefold()]
pi = fuzzy_retrieval(peoplecoll, ["_id", "aka", "name"], pi_ref[0])
return pi.get("_id")
def group_member_ids(ppl_coll, grpname):
"""Get a list of all group member ids
Parameters
----------
ppl_coll: collection (list of dicts)
The people collection that should contain the group members
grp: string
The id of the group in groups.yml
Returns
-------
set:
The set of ids of the people in the group
Notes
-----
- Groups that are being tracked are listed in the groups.yml collection
with a name and an id.
- People are in a group during an educational or employment period.
- To assign a person to a tracked group during one such period, add
a "group" key to that education/employment item with a value
that is the group id.
- This function takes the group id that is passed and searches
the people collection for all people that have been
assigned to that group in some period of time and returns a list of
"""
grpmembers = set()
for person in ppl_coll:
for k in ["education", "employment"]:
for position in person.get(k, {}):
if position.get("group", None) == grpname:
grpmembers.add(person["_id"])
return grpmembers
|
has_finished
|
true if today is before the dates given, inclusive
Parameters
----------
ey : int
end year, the year to check today against
em : int or str.
end month, the month to check today against. Should be integer or in regolith MONTHS.
default is 1
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
|
"""Misc. regolith tools.
"""
import email.utils
import os
import platform
import re
import sys
import time
from copy import deepcopy
from calendar import monthrange
from datetime import datetime, date, timedelta
from regolith.dates import month_to_int, date_to_float, get_dates
from regolith.sorters import doc_date_key, id_key, ene_date_key
from regolith.chained_db import ChainDB
try:
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
HAVE_BIBTEX_PARSER = True
except ImportError:
HAVE_BIBTEX_PARSER = False
LATEX_OPTS = ["-halt-on-error", "-file-line-error"]
if sys.version_info[0] >= 3:
string_types = (str, bytes)
unicode_type = str
else:
pass
# string_types = (str, unicode)
# unicode_type = unicode
DEFAULT_ENCODING = sys.getdefaultencoding()
ON_WINDOWS = platform.system() == "Windows"
ON_MAC = platform.system() == "Darwin"
ON_LINUX = platform.system() == "Linux"
ON_POSIX = os.name == "posix"
def dbdirname(db, rc):
"""Gets the database dir name."""
if db.get("local", False) is False:
dbsdir = os.path.join(rc.builddir, "_dbs")
dbdir = os.path.join(dbsdir, db["name"])
else:
dbdir = db["url"]
return dbdir
def dbpathname(db, rc):
"""Gets the database path name."""
dbdir = dbdirname(db, rc)
dbpath = os.path.join(dbdir, db["path"])
return dbpath
def fallback(cond, backup):
"""Decorator for returning the object if cond is true and a backup if
cond is false. """
def dec(obj):
return obj if cond else backup
return dec
def all_docs_from_collection(client, collname, copy=True):
"""Yield all entries in for all collections of a given name in a given
database. """
yield from client.all_documents(collname, copy=copy)
SHORT_MONTH_NAMES = (
None,
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sept",
"Oct",
"Nov",
"Dec",
)
def date_to_rfc822(y, m, d=1):
"""Converts a date to an RFC 822 formatted string."""
d = datetime(int(y), month_to_int(m), int(d))
return email.utils.format_datetime(d)
def rfc822now():
"""Creates a string of the current time according to RFC 822."""
now = datetime.utcnow()
return email.utils.format_datetime(now)
def gets(seq, key, default=None):
"""Gets a key from every element of a sequence if possible."""
for x in seq:
yield x.get(key, default)
def month_and_year(m=None, y=None):
"""Creates a string from month and year data, if available."""
if y is None:
return "present"
if m is None:
return str(y)
m = month_to_int(m)
return "{0} {1}".format(SHORT_MONTH_NAMES[m], y)
def is_since(y, sy, m=1, d=1, sm=1, sd=1):
"""
tests whether a date is on or since another date
Parameters
----------
y : int
the year to be tested
sy : int
the since year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
sd: int
the since day. Optional, defaults to 1
Returns
-------
True if the target date is the same as, or more recent than, the since date
"""
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
d = "{}/{}/{}".format(d, month_to_int(m), y)
since = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return since <= date
def is_before(y, by, m=12, d=None, bm=12, bd=None):
"""
tests whether a date is on or before another date
Parameters
----------
y : int
the year to be tested
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Dec
d : int
the day to be tested. Defaults to last day of the month
bm : int or str
the before month. Optional, defaults to Dec
bd: int
the before day. Optional, defaults to last day of the month
Returns
-------
True if the target date is the same as, or earlier than, the before date
"""
if not d:
d = monthrange(y, month_to_int(m))[1]
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
b = "{}/{}/{}".format(bd, month_to_int(bm), by)
d = "{}/{}/{}".format(d, month_to_int(m), y)
before = time.mktime(datetime.strptime(b, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return before >= date
def is_between(y, sy, by, m=1, d=1, sm=1, sd=1, bm=12, bd=None):
"""
tests whether a date is on or between two other dates
returns true if the target date is between the since date and the before
date, inclusive.
Parameters
----------
y : int
the year to be tested
sy : int
the since year
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
bm : int or str
the before month. Optional, defaults to Dec
sd: int
the since day. Optional, defaults to 1
bd: int
the before day. Optional, defaults to 28
Returns
-------
True if the target date is between the since date and the before date,
inclusive (i.e., returns true if the target date is the same as either the
since date or the before date)
"""
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
return is_since(y, sy, m=m, d=d, sm=sm, sd=sd) and is_before(
y, by, m=m, d=d, bm=bm, bd=bd
)
def has_started(sy, sm=None, sd=None):
"""
true if today is after the dates given, inclusive
Parameters
----------
sy : int
the year to check today against
sm : int or str.
the month to check today against. Should be integer or in regolith MONTHS.
default is 1
sd : int.
the day to check today against. Default is 1
Returns
-------
bool
true if today is after dates given
"""
if not sm:
sm = 1
if not sd:
sd = 1
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
start = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
return start <= time.time()
# MASKED: has_finished function (lines 259-284)
def is_current(sy, ey, sm=None, sd=None, em=None, ed=None):
"""
true if today is between the dates given, inclusive
Parameters
----------
sy : int
start year, the year to check today is after
ey : int
end year, the year to check today is before
sm : int or str
start month, the month to check today is after. Should be integer or in
regolith MONTHS. Default is 1
sd : int
start day, the day to check today after. Default is 1
em : int or str.
end month, the month to check today against. Should be integer or in
regolith MONTHS. Default is 12
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
return has_started(sy, sm, sd) and not has_finished(ey, em, ed)
def filter_publications(citations, authors, reverse=False, bold=True):
"""Filter publications by the author(s)/editor(s)
Parameters
----------
citations : list of dict
The publication citations
authors : set of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
bold : bool, optional
If True put latex bold around the author(s) in question
"""
pubs = []
for pub in citations:
if (
len((set(pub.get("author", [])) | set(
pub.get("editor", []))) & authors)
== 0
):
continue
pub = deepcopy(pub)
if bold:
bold_self = []
for a in pub["author"]:
if a in authors:
bold_self.append("\\textbf{" + a + "}")
else:
bold_self.append(a)
pub["author"] = bold_self
else:
pub = deepcopy(pub)
pubs.append(pub)
pubs.sort(key=doc_date_key, reverse=reverse)
return pubs
def filter_projects(projects, authors, reverse=False):
"""Filter projects by the author(s)
Parameters
----------
projects : list of dict
The publication citations
authors : set of list of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
"""
projs = []
for proj in projects:
team_names = set(gets(proj["team"], "name"))
if len(team_names & authors) == 0:
continue
# FIXME delete these lines if not required. I think they are wrong (SJLB)
# proj = dict(proj)
# proj["team"] = [x for x in proj["team"] if x["name"] in authors]
projs.append(proj)
projs.sort(key=id_key, reverse=reverse)
return projs
def filter_grants(input_grants, names, pi=True, reverse=True, multi_pi=False):
"""Filter grants by those involved
Parameters
----------
input_grants : list of dict
The grants to filter
names : set of str
The authors to be filtered against
pi : bool, optional
If True add the grant amount to that person's total amount
reverse : bool, optional
If True reverse the order, defaults to False
multi_pi : bool, optional
If True compute sub-awards for multi PI grants, defaults to False
"""
grants = []
total_amount = 0.0
subaward_amount = 0.0
for grant in input_grants:
team_names = set(gets(grant["team"], "name"))
if len(team_names & names) == 0:
continue
grant = deepcopy(grant)
person = [x for x in grant["team"] if x["name"] in names][0]
if pi:
if person["position"].lower() == "pi":
total_amount += grant["amount"]
else:
continue
elif multi_pi:
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["multi_pi"] = any(gets(grant["team"], "subaward_amount"))
else:
if person["position"].lower() == "pi":
continue
else:
total_amount += grant["amount"]
subaward_amount += person.get("subaward_amount", 0.0)
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["pi"] = [
x for x in grant["team"] if x["position"].lower() == "pi"
][0]
grant["me"] = person
grants.append(grant)
grants.sort(key=ene_date_key, reverse=reverse)
return grants, total_amount, subaward_amount
def awards_grants_honors(p):
"""Make sorted awards grants and honors list.
Parameters
----------
p : dict
The person entry
"""
aghs = []
for x in p.get("funding", ()):
d = {
"description": "{0} ({1}{2:,})".format(
latex_safe(x["name"]),
x.get("currency", "$").replace("$", "\$"),
x["value"],
),
"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0)),
}
aghs.append(d)
for x in p.get("service", []) + p.get("honors", []):
d = {"description": latex_safe(x["name"])}
if "year" in x:
d.update(
{"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0))}
)
elif "begin_year" in x and "end_year" in x:
d.update(
{
"year": "{}-{}".format(x["begin_year"], x["end_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
elif "begin_year" in x:
d.update(
{
"year": "{}".format(x["begin_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
aghs.append(d)
aghs.sort(key=(lambda x: x.get("_key", 0.0)), reverse=True)
return aghs
HTTP_RE = re.compile(
r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
)
def latex_safe_url(s):
"""Makes a string that is a URL latex safe."""
return s.replace("#", r"\#")
def latex_safe(s, url_check=True, wrapper="url"):
"""Make string latex safe
Parameters
----------
s : str
url_check : bool, optional
If True check for URLs and wrap them, if False check for URL but don't
wrap, defaults to True
wrapper : str, optional
The wrapper for wrapping urls defaults to url
"""
if not s:
return s
if url_check:
# If it looks like a URL make it a latex URL
url_search = HTTP_RE.search(s)
if url_search:
url = r"{start}\{wrapper}{{{s}}}{end}".format(
start=(latex_safe(s[: url_search.start()])),
end=(latex_safe(s[url_search.end():])),
wrapper=wrapper,
s=latex_safe_url(s[url_search.start(): url_search.end()]),
)
return url
return (
s.replace("&", r"\&")
.replace("$", r"\$")
.replace("#", r"\#")
.replace("_", r"\_")
)
def make_bibtex_file(pubs, pid, person_dir="."):
"""Make a bibtex file given the publications
Parameters
----------
pubs : list of dict
The publications
pid : str
The person id
person_dir : str, optional
The person's directory
"""
if not HAVE_BIBTEX_PARSER:
return None
skip_keys = {"ID", "ENTRYTYPE", "author"}
bibdb = BibDatabase()
bibwriter = BibTexWriter()
bibdb.entries = ents = []
for pub in pubs:
ent = dict(pub)
ent["ID"] = ent.pop("_id")
ent["ENTRYTYPE"] = ent.pop("entrytype")
for n in ["author", "editor"]:
if n in ent:
ent[n] = " and ".join(ent[n])
for key in ent.keys():
if key in skip_keys:
continue
ent[key] = latex_safe(str(ent[key]))
ents.append(ent)
fname = os.path.join(person_dir, pid) + ".bib"
with open(fname, "w", encoding="utf-8") as f:
f.write(bibwriter.write(bibdb))
return fname
def document_by_value(documents, address, value):
"""Get a specific document by one of its values
Parameters
----------
documents: generator
Generator which yields the documents
address: str or tuple
The address of the data in the document
value: any
The expected value for the document
Returns
-------
dict:
The first document which matches the request
"""
if isinstance(address, str):
address = (address,)
for g_doc in documents:
doc = deepcopy(g_doc)
for add in address:
doc = doc[add]
if doc == value:
return g_doc
def fuzzy_retrieval(documents, sources, value, case_sensitive=True):
"""Retrieve a document from the documents where value is compared against
multiple potential sources
Parameters
----------
documents: generator
The documents
sources: iterable
The potential data sources
value:
The value to compare against to find the document of interest
case_sensitive: Bool
When true will match case (Default = True)
Returns
-------
dict:
The document
Examples
--------
>>> fuzzy_retrieval(people, ['aka', 'name'], 'pi_name', case_sensitive = False)
This would get the person entry for which either the alias or the name was
``pi_name``.
"""
for doc in documents:
returns = []
for k in sources:
ret = doc.get(k, [])
if not isinstance(ret, list):
ret = [ret]
returns.extend(ret)
if not case_sensitive:
returns = [reti.lower() for reti in returns if
isinstance(reti, str)]
if isinstance(value, str):
if value.lower() in frozenset(returns):
return doc
else:
if value in frozenset(returns):
return doc
def number_suffix(number):
"""returns the suffix that adjectivises a number (st, nd, rd, th)
Paramters
---------
number: integer
The number. If number is not an integer, returns an empty string
Returns
-------
suffix: string
The suffix (st, nd, rd, th)
"""
if not isinstance(number, (int, float)):
return ""
if 10 < number < 20:
suffix = "th"
else:
suffix = {1: "st", 2: "nd", 3: "rd"}.get(number % 10, "th")
return suffix
def dereference_institution(input_record, institutions):
"""Tool for replacing placeholders for institutions with the actual
institution data. Note that the replacement is done inplace
Parameters
----------
input_record : dict
The record to dereference
institutions : iterable of dicts
The institutions
"""
inst = input_record.get("institution") or input_record.get("organization")
if not inst:
error = input_record.get("position") or input_record.get("degree")
print("WARNING: no institution or organization but found {}".format(
error))
db_inst = fuzzy_retrieval(institutions, ["name", "_id", "aka"], inst)
if db_inst:
input_record["institution"] = db_inst["name"]
input_record["organization"] = db_inst["name"]
if db_inst.get("country") == "USA":
state_country = db_inst.get("state")
else:
state_country = db_inst.get("country")
input_record["location"] = "{}, {}".format(db_inst["city"],
state_country)
if not db_inst.get("departments"):
print("WARNING: no departments in {}. {} sought".format(
db_inst.get("_id"), inst))
if "department" in input_record and db_inst.get("departments"):
input_record["department"] = fuzzy_retrieval(
[db_inst["departments"]], ["name", "aka"],
input_record["department"]
)
else:
input_record["department"] = inst
def merge_collections(a, b, target_id):
"""
merge two collections into a single merged collection
for keys that are in both collections, the value in b will be kept
Parameters
----------
a the inferior collection (will lose values of shared keys)
b the superior collection (will keep values of shared keys)
target_id str the name of the key used in b to dereference ids in a
Returns
-------
the combined collection. Note that it returns a collection only containing
merged items from a and b that are dereferenced in b, i.e., the merged
intercept. If you want the union you can update the returned collection
with a.
Examples
--------
>>> grants = merge_collections(self.gtx["proposals"], self.gtx["grants"], "proposal_id")
This would merge all entries in the proposals collection with entries in the
grants collection for which "_id" in proposals has the value of
"proposal_id" in grants.
"""
adict = {}
for k in a:
adict[k.get("_id")] = k
bdict = {}
for k in b:
bdict[k.get("_id")] = k
b_for_a = {}
for k in adict:
for kk, v in bdict.items():
if v.get(target_id, "") == k:
b_for_a[k] = kk
chained = {}
for k, v in b_for_a.items():
chained[k] = ChainDB(adict[k], bdict[v])
return list(chained.values())
def update_schemas(default_schema, user_schema):
"""
Merging the user schema into the default schema recursively and return the
merged schema. The default schema and user schema will not be modified
during the merging.
Parameters
----------
default_schema : dict
The default schema.
user_schema : dict
The user defined schema.
Returns
-------
updated_schema : dict
The merged schema.
"""
updated_schema = deepcopy(default_schema)
for key in user_schema.keys():
if (key in updated_schema) and isinstance(updated_schema[key],
dict) and isinstance(
user_schema[key], dict):
updated_schema[key] = update_schemas(updated_schema[key],
user_schema[key])
else:
updated_schema[key] = user_schema[key]
return updated_schema
def is_fully_loaded(appts):
status = True
earliest, latest = date.today(), date.today()
for appt in appts:
dates = get_dates(appt)
begin_date = dates['begin_date']
end_date = dates['end_date']
if latest == date.today():
latest = end_date
appt['begin_date'] = begin_date
appt['end_date'] = end_date
if begin_date < earliest:
earliest = begin_date
if end_date > latest:
latest = end_date
datearray = []
timespan = latest - earliest
for x in range(0, timespan.days):
datearray.append(earliest + timedelta(days=x))
loading = [0] * len(datearray)
for day in datearray:
for appt in appts:
if appt['begin_date'] <= day <= appt["end_date"]:
loading[datearray.index(day)] = loading[datearray.index(day)] + \
appt.get("loading")
if max(loading) > 1.0:
status = False
print("max {} at {}".format(max(loading),
datearray[
list(loading).index(max(loading))]))
elif min(loading) < 1.0:
status = False
print("min {} at {}".format(min(loading),
datearray[list(loading).index(min(loading))]
))
return status
def group(db, by):
"""
Group the document in the database according to the value of the doc[by] in db.
Parameters
----------
db : iterable
The database of documents.
by : basestring
The key to group the documents.
Returns
-------
grouped: dict
A dictionary mapping the feature value of group to the list of docs. All docs in the same generator have
the same value of doc[by].
Examples
--------
Here, we use a tuple of dict as an example of the database.
>>> db = ({"k": "v0"}, {"k": "v1"}, {"k": "v0"})
>>> group(db)
This will return
>>> {"v0": [{"k": "v0"}, {"k": "v0"}], "v1": [{"k": "v1"}]}
"""
grouped = {}
doc: dict
for doc in db:
key = doc.get(by)
if not key:
print("There is no field {} in {}".format(by, id_key(doc)))
elif key not in grouped:
grouped[key] = [doc]
else:
grouped[key].append(doc)
return grouped
def get_pi_id(rc):
"""
Gets the database id of the group PI
Parameters
----------
rc: runcontrol object
The runcontrol object. It must contain the 'groups' and 'people'
collections in the needed databases
Returns
-------
The database '_id' of the group PI
"""
groupiter = list(all_docs_from_collection(rc.client, "groups"))
peoplecoll = all_docs_from_collection(rc.client, "people")
pi_ref = [i.get("pi_name") for i in groupiter if
i.get("name").casefold() == rc.groupname.casefold()]
pi = fuzzy_retrieval(peoplecoll, ["_id", "aka", "name"], pi_ref[0])
return pi.get("_id")
def group_member_ids(ppl_coll, grpname):
"""Get a list of all group member ids
Parameters
----------
ppl_coll: collection (list of dicts)
The people collection that should contain the group members
grp: string
The id of the group in groups.yml
Returns
-------
set:
The set of ids of the people in the group
Notes
-----
- Groups that are being tracked are listed in the groups.yml collection
with a name and an id.
- People are in a group during an educational or employment period.
- To assign a person to a tracked group during one such period, add
a "group" key to that education/employment item with a value
that is the group id.
- This function takes the group id that is passed and searches
the people collection for all people that have been
assigned to that group in some period of time and returns a list of
"""
grpmembers = set()
for person in ppl_coll:
for k in ["education", "employment"]:
for position in person.get(k, {}):
if position.get("group", None) == grpname:
grpmembers.add(person["_id"])
return grpmembers
|
def has_finished(ey, em=None, ed=None):
"""
true if today is before the dates given, inclusive
Parameters
----------
ey : int
end year, the year to check today against
em : int or str.
end month, the month to check today against. Should be integer or in regolith MONTHS.
default is 1
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
if not em:
em = 12
if not ed:
ed = monthrange(ey, month_to_int(em))[1]
e = "{}/{}/{}".format(ed, month_to_int(em), ey)
end = time.mktime(datetime.strptime(e, "%d/%m/%Y").timetuple())
return end <= time.time()
| 259 | 284 |
"""Misc. regolith tools.
"""
import email.utils
import os
import platform
import re
import sys
import time
from copy import deepcopy
from calendar import monthrange
from datetime import datetime, date, timedelta
from regolith.dates import month_to_int, date_to_float, get_dates
from regolith.sorters import doc_date_key, id_key, ene_date_key
from regolith.chained_db import ChainDB
try:
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
HAVE_BIBTEX_PARSER = True
except ImportError:
HAVE_BIBTEX_PARSER = False
LATEX_OPTS = ["-halt-on-error", "-file-line-error"]
if sys.version_info[0] >= 3:
string_types = (str, bytes)
unicode_type = str
else:
pass
# string_types = (str, unicode)
# unicode_type = unicode
DEFAULT_ENCODING = sys.getdefaultencoding()
ON_WINDOWS = platform.system() == "Windows"
ON_MAC = platform.system() == "Darwin"
ON_LINUX = platform.system() == "Linux"
ON_POSIX = os.name == "posix"
def dbdirname(db, rc):
"""Gets the database dir name."""
if db.get("local", False) is False:
dbsdir = os.path.join(rc.builddir, "_dbs")
dbdir = os.path.join(dbsdir, db["name"])
else:
dbdir = db["url"]
return dbdir
def dbpathname(db, rc):
"""Gets the database path name."""
dbdir = dbdirname(db, rc)
dbpath = os.path.join(dbdir, db["path"])
return dbpath
def fallback(cond, backup):
"""Decorator for returning the object if cond is true and a backup if
cond is false. """
def dec(obj):
return obj if cond else backup
return dec
def all_docs_from_collection(client, collname, copy=True):
"""Yield all entries in for all collections of a given name in a given
database. """
yield from client.all_documents(collname, copy=copy)
SHORT_MONTH_NAMES = (
None,
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sept",
"Oct",
"Nov",
"Dec",
)
def date_to_rfc822(y, m, d=1):
"""Converts a date to an RFC 822 formatted string."""
d = datetime(int(y), month_to_int(m), int(d))
return email.utils.format_datetime(d)
def rfc822now():
"""Creates a string of the current time according to RFC 822."""
now = datetime.utcnow()
return email.utils.format_datetime(now)
def gets(seq, key, default=None):
"""Gets a key from every element of a sequence if possible."""
for x in seq:
yield x.get(key, default)
def month_and_year(m=None, y=None):
"""Creates a string from month and year data, if available."""
if y is None:
return "present"
if m is None:
return str(y)
m = month_to_int(m)
return "{0} {1}".format(SHORT_MONTH_NAMES[m], y)
def is_since(y, sy, m=1, d=1, sm=1, sd=1):
"""
tests whether a date is on or since another date
Parameters
----------
y : int
the year to be tested
sy : int
the since year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
sd: int
the since day. Optional, defaults to 1
Returns
-------
True if the target date is the same as, or more recent than, the since date
"""
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
d = "{}/{}/{}".format(d, month_to_int(m), y)
since = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return since <= date
def is_before(y, by, m=12, d=None, bm=12, bd=None):
"""
tests whether a date is on or before another date
Parameters
----------
y : int
the year to be tested
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Dec
d : int
the day to be tested. Defaults to last day of the month
bm : int or str
the before month. Optional, defaults to Dec
bd: int
the before day. Optional, defaults to last day of the month
Returns
-------
True if the target date is the same as, or earlier than, the before date
"""
if not d:
d = monthrange(y, month_to_int(m))[1]
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
b = "{}/{}/{}".format(bd, month_to_int(bm), by)
d = "{}/{}/{}".format(d, month_to_int(m), y)
before = time.mktime(datetime.strptime(b, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return before >= date
def is_between(y, sy, by, m=1, d=1, sm=1, sd=1, bm=12, bd=None):
"""
tests whether a date is on or between two other dates
returns true if the target date is between the since date and the before
date, inclusive.
Parameters
----------
y : int
the year to be tested
sy : int
the since year
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
bm : int or str
the before month. Optional, defaults to Dec
sd: int
the since day. Optional, defaults to 1
bd: int
the before day. Optional, defaults to 28
Returns
-------
True if the target date is between the since date and the before date,
inclusive (i.e., returns true if the target date is the same as either the
since date or the before date)
"""
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
return is_since(y, sy, m=m, d=d, sm=sm, sd=sd) and is_before(
y, by, m=m, d=d, bm=bm, bd=bd
)
def has_started(sy, sm=None, sd=None):
"""
true if today is after the dates given, inclusive
Parameters
----------
sy : int
the year to check today against
sm : int or str.
the month to check today against. Should be integer or in regolith MONTHS.
default is 1
sd : int.
the day to check today against. Default is 1
Returns
-------
bool
true if today is after dates given
"""
if not sm:
sm = 1
if not sd:
sd = 1
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
start = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
return start <= time.time()
def has_finished(ey, em=None, ed=None):
"""
true if today is before the dates given, inclusive
Parameters
----------
ey : int
end year, the year to check today against
em : int or str.
end month, the month to check today against. Should be integer or in regolith MONTHS.
default is 1
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
if not em:
em = 12
if not ed:
ed = monthrange(ey, month_to_int(em))[1]
e = "{}/{}/{}".format(ed, month_to_int(em), ey)
end = time.mktime(datetime.strptime(e, "%d/%m/%Y").timetuple())
return end <= time.time()
def is_current(sy, ey, sm=None, sd=None, em=None, ed=None):
"""
true if today is between the dates given, inclusive
Parameters
----------
sy : int
start year, the year to check today is after
ey : int
end year, the year to check today is before
sm : int or str
start month, the month to check today is after. Should be integer or in
regolith MONTHS. Default is 1
sd : int
start day, the day to check today after. Default is 1
em : int or str.
end month, the month to check today against. Should be integer or in
regolith MONTHS. Default is 12
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
return has_started(sy, sm, sd) and not has_finished(ey, em, ed)
def filter_publications(citations, authors, reverse=False, bold=True):
"""Filter publications by the author(s)/editor(s)
Parameters
----------
citations : list of dict
The publication citations
authors : set of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
bold : bool, optional
If True put latex bold around the author(s) in question
"""
pubs = []
for pub in citations:
if (
len((set(pub.get("author", [])) | set(
pub.get("editor", []))) & authors)
== 0
):
continue
pub = deepcopy(pub)
if bold:
bold_self = []
for a in pub["author"]:
if a in authors:
bold_self.append("\\textbf{" + a + "}")
else:
bold_self.append(a)
pub["author"] = bold_self
else:
pub = deepcopy(pub)
pubs.append(pub)
pubs.sort(key=doc_date_key, reverse=reverse)
return pubs
def filter_projects(projects, authors, reverse=False):
"""Filter projects by the author(s)
Parameters
----------
projects : list of dict
The publication citations
authors : set of list of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
"""
projs = []
for proj in projects:
team_names = set(gets(proj["team"], "name"))
if len(team_names & authors) == 0:
continue
# FIXME delete these lines if not required. I think they are wrong (SJLB)
# proj = dict(proj)
# proj["team"] = [x for x in proj["team"] if x["name"] in authors]
projs.append(proj)
projs.sort(key=id_key, reverse=reverse)
return projs
def filter_grants(input_grants, names, pi=True, reverse=True, multi_pi=False):
"""Filter grants by those involved
Parameters
----------
input_grants : list of dict
The grants to filter
names : set of str
The authors to be filtered against
pi : bool, optional
If True add the grant amount to that person's total amount
reverse : bool, optional
If True reverse the order, defaults to False
multi_pi : bool, optional
If True compute sub-awards for multi PI grants, defaults to False
"""
grants = []
total_amount = 0.0
subaward_amount = 0.0
for grant in input_grants:
team_names = set(gets(grant["team"], "name"))
if len(team_names & names) == 0:
continue
grant = deepcopy(grant)
person = [x for x in grant["team"] if x["name"] in names][0]
if pi:
if person["position"].lower() == "pi":
total_amount += grant["amount"]
else:
continue
elif multi_pi:
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["multi_pi"] = any(gets(grant["team"], "subaward_amount"))
else:
if person["position"].lower() == "pi":
continue
else:
total_amount += grant["amount"]
subaward_amount += person.get("subaward_amount", 0.0)
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["pi"] = [
x for x in grant["team"] if x["position"].lower() == "pi"
][0]
grant["me"] = person
grants.append(grant)
grants.sort(key=ene_date_key, reverse=reverse)
return grants, total_amount, subaward_amount
def awards_grants_honors(p):
"""Make sorted awards grants and honors list.
Parameters
----------
p : dict
The person entry
"""
aghs = []
for x in p.get("funding", ()):
d = {
"description": "{0} ({1}{2:,})".format(
latex_safe(x["name"]),
x.get("currency", "$").replace("$", "\$"),
x["value"],
),
"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0)),
}
aghs.append(d)
for x in p.get("service", []) + p.get("honors", []):
d = {"description": latex_safe(x["name"])}
if "year" in x:
d.update(
{"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0))}
)
elif "begin_year" in x and "end_year" in x:
d.update(
{
"year": "{}-{}".format(x["begin_year"], x["end_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
elif "begin_year" in x:
d.update(
{
"year": "{}".format(x["begin_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
aghs.append(d)
aghs.sort(key=(lambda x: x.get("_key", 0.0)), reverse=True)
return aghs
HTTP_RE = re.compile(
r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
)
def latex_safe_url(s):
"""Makes a string that is a URL latex safe."""
return s.replace("#", r"\#")
def latex_safe(s, url_check=True, wrapper="url"):
"""Make string latex safe
Parameters
----------
s : str
url_check : bool, optional
If True check for URLs and wrap them, if False check for URL but don't
wrap, defaults to True
wrapper : str, optional
The wrapper for wrapping urls defaults to url
"""
if not s:
return s
if url_check:
# If it looks like a URL make it a latex URL
url_search = HTTP_RE.search(s)
if url_search:
url = r"{start}\{wrapper}{{{s}}}{end}".format(
start=(latex_safe(s[: url_search.start()])),
end=(latex_safe(s[url_search.end():])),
wrapper=wrapper,
s=latex_safe_url(s[url_search.start(): url_search.end()]),
)
return url
return (
s.replace("&", r"\&")
.replace("$", r"\$")
.replace("#", r"\#")
.replace("_", r"\_")
)
def make_bibtex_file(pubs, pid, person_dir="."):
"""Make a bibtex file given the publications
Parameters
----------
pubs : list of dict
The publications
pid : str
The person id
person_dir : str, optional
The person's directory
"""
if not HAVE_BIBTEX_PARSER:
return None
skip_keys = {"ID", "ENTRYTYPE", "author"}
bibdb = BibDatabase()
bibwriter = BibTexWriter()
bibdb.entries = ents = []
for pub in pubs:
ent = dict(pub)
ent["ID"] = ent.pop("_id")
ent["ENTRYTYPE"] = ent.pop("entrytype")
for n in ["author", "editor"]:
if n in ent:
ent[n] = " and ".join(ent[n])
for key in ent.keys():
if key in skip_keys:
continue
ent[key] = latex_safe(str(ent[key]))
ents.append(ent)
fname = os.path.join(person_dir, pid) + ".bib"
with open(fname, "w", encoding="utf-8") as f:
f.write(bibwriter.write(bibdb))
return fname
def document_by_value(documents, address, value):
"""Get a specific document by one of its values
Parameters
----------
documents: generator
Generator which yields the documents
address: str or tuple
The address of the data in the document
value: any
The expected value for the document
Returns
-------
dict:
The first document which matches the request
"""
if isinstance(address, str):
address = (address,)
for g_doc in documents:
doc = deepcopy(g_doc)
for add in address:
doc = doc[add]
if doc == value:
return g_doc
def fuzzy_retrieval(documents, sources, value, case_sensitive=True):
"""Retrieve a document from the documents where value is compared against
multiple potential sources
Parameters
----------
documents: generator
The documents
sources: iterable
The potential data sources
value:
The value to compare against to find the document of interest
case_sensitive: Bool
When true will match case (Default = True)
Returns
-------
dict:
The document
Examples
--------
>>> fuzzy_retrieval(people, ['aka', 'name'], 'pi_name', case_sensitive = False)
This would get the person entry for which either the alias or the name was
``pi_name``.
"""
for doc in documents:
returns = []
for k in sources:
ret = doc.get(k, [])
if not isinstance(ret, list):
ret = [ret]
returns.extend(ret)
if not case_sensitive:
returns = [reti.lower() for reti in returns if
isinstance(reti, str)]
if isinstance(value, str):
if value.lower() in frozenset(returns):
return doc
else:
if value in frozenset(returns):
return doc
def number_suffix(number):
"""returns the suffix that adjectivises a number (st, nd, rd, th)
Paramters
---------
number: integer
The number. If number is not an integer, returns an empty string
Returns
-------
suffix: string
The suffix (st, nd, rd, th)
"""
if not isinstance(number, (int, float)):
return ""
if 10 < number < 20:
suffix = "th"
else:
suffix = {1: "st", 2: "nd", 3: "rd"}.get(number % 10, "th")
return suffix
def dereference_institution(input_record, institutions):
"""Tool for replacing placeholders for institutions with the actual
institution data. Note that the replacement is done inplace
Parameters
----------
input_record : dict
The record to dereference
institutions : iterable of dicts
The institutions
"""
inst = input_record.get("institution") or input_record.get("organization")
if not inst:
error = input_record.get("position") or input_record.get("degree")
print("WARNING: no institution or organization but found {}".format(
error))
db_inst = fuzzy_retrieval(institutions, ["name", "_id", "aka"], inst)
if db_inst:
input_record["institution"] = db_inst["name"]
input_record["organization"] = db_inst["name"]
if db_inst.get("country") == "USA":
state_country = db_inst.get("state")
else:
state_country = db_inst.get("country")
input_record["location"] = "{}, {}".format(db_inst["city"],
state_country)
if not db_inst.get("departments"):
print("WARNING: no departments in {}. {} sought".format(
db_inst.get("_id"), inst))
if "department" in input_record and db_inst.get("departments"):
input_record["department"] = fuzzy_retrieval(
[db_inst["departments"]], ["name", "aka"],
input_record["department"]
)
else:
input_record["department"] = inst
def merge_collections(a, b, target_id):
"""
merge two collections into a single merged collection
for keys that are in both collections, the value in b will be kept
Parameters
----------
a the inferior collection (will lose values of shared keys)
b the superior collection (will keep values of shared keys)
target_id str the name of the key used in b to dereference ids in a
Returns
-------
the combined collection. Note that it returns a collection only containing
merged items from a and b that are dereferenced in b, i.e., the merged
intercept. If you want the union you can update the returned collection
with a.
Examples
--------
>>> grants = merge_collections(self.gtx["proposals"], self.gtx["grants"], "proposal_id")
This would merge all entries in the proposals collection with entries in the
grants collection for which "_id" in proposals has the value of
"proposal_id" in grants.
"""
adict = {}
for k in a:
adict[k.get("_id")] = k
bdict = {}
for k in b:
bdict[k.get("_id")] = k
b_for_a = {}
for k in adict:
for kk, v in bdict.items():
if v.get(target_id, "") == k:
b_for_a[k] = kk
chained = {}
for k, v in b_for_a.items():
chained[k] = ChainDB(adict[k], bdict[v])
return list(chained.values())
def update_schemas(default_schema, user_schema):
"""
Merging the user schema into the default schema recursively and return the
merged schema. The default schema and user schema will not be modified
during the merging.
Parameters
----------
default_schema : dict
The default schema.
user_schema : dict
The user defined schema.
Returns
-------
updated_schema : dict
The merged schema.
"""
updated_schema = deepcopy(default_schema)
for key in user_schema.keys():
if (key in updated_schema) and isinstance(updated_schema[key],
dict) and isinstance(
user_schema[key], dict):
updated_schema[key] = update_schemas(updated_schema[key],
user_schema[key])
else:
updated_schema[key] = user_schema[key]
return updated_schema
def is_fully_loaded(appts):
status = True
earliest, latest = date.today(), date.today()
for appt in appts:
dates = get_dates(appt)
begin_date = dates['begin_date']
end_date = dates['end_date']
if latest == date.today():
latest = end_date
appt['begin_date'] = begin_date
appt['end_date'] = end_date
if begin_date < earliest:
earliest = begin_date
if end_date > latest:
latest = end_date
datearray = []
timespan = latest - earliest
for x in range(0, timespan.days):
datearray.append(earliest + timedelta(days=x))
loading = [0] * len(datearray)
for day in datearray:
for appt in appts:
if appt['begin_date'] <= day <= appt["end_date"]:
loading[datearray.index(day)] = loading[datearray.index(day)] + \
appt.get("loading")
if max(loading) > 1.0:
status = False
print("max {} at {}".format(max(loading),
datearray[
list(loading).index(max(loading))]))
elif min(loading) < 1.0:
status = False
print("min {} at {}".format(min(loading),
datearray[list(loading).index(min(loading))]
))
return status
def group(db, by):
"""
Group the document in the database according to the value of the doc[by] in db.
Parameters
----------
db : iterable
The database of documents.
by : basestring
The key to group the documents.
Returns
-------
grouped: dict
A dictionary mapping the feature value of group to the list of docs. All docs in the same generator have
the same value of doc[by].
Examples
--------
Here, we use a tuple of dict as an example of the database.
>>> db = ({"k": "v0"}, {"k": "v1"}, {"k": "v0"})
>>> group(db)
This will return
>>> {"v0": [{"k": "v0"}, {"k": "v0"}], "v1": [{"k": "v1"}]}
"""
grouped = {}
doc: dict
for doc in db:
key = doc.get(by)
if not key:
print("There is no field {} in {}".format(by, id_key(doc)))
elif key not in grouped:
grouped[key] = [doc]
else:
grouped[key].append(doc)
return grouped
def get_pi_id(rc):
"""
Gets the database id of the group PI
Parameters
----------
rc: runcontrol object
The runcontrol object. It must contain the 'groups' and 'people'
collections in the needed databases
Returns
-------
The database '_id' of the group PI
"""
groupiter = list(all_docs_from_collection(rc.client, "groups"))
peoplecoll = all_docs_from_collection(rc.client, "people")
pi_ref = [i.get("pi_name") for i in groupiter if
i.get("name").casefold() == rc.groupname.casefold()]
pi = fuzzy_retrieval(peoplecoll, ["_id", "aka", "name"], pi_ref[0])
return pi.get("_id")
def group_member_ids(ppl_coll, grpname):
"""Get a list of all group member ids
Parameters
----------
ppl_coll: collection (list of dicts)
The people collection that should contain the group members
grp: string
The id of the group in groups.yml
Returns
-------
set:
The set of ids of the people in the group
Notes
-----
- Groups that are being tracked are listed in the groups.yml collection
with a name and an id.
- People are in a group during an educational or employment period.
- To assign a person to a tracked group during one such period, add
a "group" key to that education/employment item with a value
that is the group id.
- This function takes the group id that is passed and searches
the people collection for all people that have been
assigned to that group in some period of time and returns a list of
"""
grpmembers = set()
for person in ppl_coll:
for k in ["education", "employment"]:
for position in person.get(k, {}):
if position.get("group", None) == grpname:
grpmembers.add(person["_id"])
return grpmembers
|
group
|
Group the document in the database according to the value of the doc[by] in db.
Parameters
----------
db : iterable
The database of documents.
by : basestring
The key to group the documents.
Returns
-------
grouped: dict
A dictionary mapping the feature value of group to the list of docs. All docs in the same generator have
the same value of doc[by].
Examples
--------
Here, we use a tuple of dict as an example of the database.
>>> db = ({"k": "v0"}, {"k": "v1"}, {"k": "v0"})
>>> group(db)
This will return
>>> {"v0": [{"k": "v0"}, {"k": "v0"}], "v1": [{"k": "v1"}]}
|
"""Misc. regolith tools.
"""
import email.utils
import os
import platform
import re
import sys
import time
from copy import deepcopy
from calendar import monthrange
from datetime import datetime, date, timedelta
from regolith.dates import month_to_int, date_to_float, get_dates
from regolith.sorters import doc_date_key, id_key, ene_date_key
from regolith.chained_db import ChainDB
try:
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
HAVE_BIBTEX_PARSER = True
except ImportError:
HAVE_BIBTEX_PARSER = False
LATEX_OPTS = ["-halt-on-error", "-file-line-error"]
if sys.version_info[0] >= 3:
string_types = (str, bytes)
unicode_type = str
else:
pass
# string_types = (str, unicode)
# unicode_type = unicode
DEFAULT_ENCODING = sys.getdefaultencoding()
ON_WINDOWS = platform.system() == "Windows"
ON_MAC = platform.system() == "Darwin"
ON_LINUX = platform.system() == "Linux"
ON_POSIX = os.name == "posix"
def dbdirname(db, rc):
"""Gets the database dir name."""
if db.get("local", False) is False:
dbsdir = os.path.join(rc.builddir, "_dbs")
dbdir = os.path.join(dbsdir, db["name"])
else:
dbdir = db["url"]
return dbdir
def dbpathname(db, rc):
"""Gets the database path name."""
dbdir = dbdirname(db, rc)
dbpath = os.path.join(dbdir, db["path"])
return dbpath
def fallback(cond, backup):
"""Decorator for returning the object if cond is true and a backup if
cond is false. """
def dec(obj):
return obj if cond else backup
return dec
def all_docs_from_collection(client, collname, copy=True):
"""Yield all entries in for all collections of a given name in a given
database. """
yield from client.all_documents(collname, copy=copy)
SHORT_MONTH_NAMES = (
None,
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sept",
"Oct",
"Nov",
"Dec",
)
def date_to_rfc822(y, m, d=1):
"""Converts a date to an RFC 822 formatted string."""
d = datetime(int(y), month_to_int(m), int(d))
return email.utils.format_datetime(d)
def rfc822now():
"""Creates a string of the current time according to RFC 822."""
now = datetime.utcnow()
return email.utils.format_datetime(now)
def gets(seq, key, default=None):
"""Gets a key from every element of a sequence if possible."""
for x in seq:
yield x.get(key, default)
def month_and_year(m=None, y=None):
"""Creates a string from month and year data, if available."""
if y is None:
return "present"
if m is None:
return str(y)
m = month_to_int(m)
return "{0} {1}".format(SHORT_MONTH_NAMES[m], y)
def is_since(y, sy, m=1, d=1, sm=1, sd=1):
"""
tests whether a date is on or since another date
Parameters
----------
y : int
the year to be tested
sy : int
the since year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
sd: int
the since day. Optional, defaults to 1
Returns
-------
True if the target date is the same as, or more recent than, the since date
"""
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
d = "{}/{}/{}".format(d, month_to_int(m), y)
since = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return since <= date
def is_before(y, by, m=12, d=None, bm=12, bd=None):
"""
tests whether a date is on or before another date
Parameters
----------
y : int
the year to be tested
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Dec
d : int
the day to be tested. Defaults to last day of the month
bm : int or str
the before month. Optional, defaults to Dec
bd: int
the before day. Optional, defaults to last day of the month
Returns
-------
True if the target date is the same as, or earlier than, the before date
"""
if not d:
d = monthrange(y, month_to_int(m))[1]
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
b = "{}/{}/{}".format(bd, month_to_int(bm), by)
d = "{}/{}/{}".format(d, month_to_int(m), y)
before = time.mktime(datetime.strptime(b, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return before >= date
def is_between(y, sy, by, m=1, d=1, sm=1, sd=1, bm=12, bd=None):
"""
tests whether a date is on or between two other dates
returns true if the target date is between the since date and the before
date, inclusive.
Parameters
----------
y : int
the year to be tested
sy : int
the since year
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
bm : int or str
the before month. Optional, defaults to Dec
sd: int
the since day. Optional, defaults to 1
bd: int
the before day. Optional, defaults to 28
Returns
-------
True if the target date is between the since date and the before date,
inclusive (i.e., returns true if the target date is the same as either the
since date or the before date)
"""
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
return is_since(y, sy, m=m, d=d, sm=sm, sd=sd) and is_before(
y, by, m=m, d=d, bm=bm, bd=bd
)
def has_started(sy, sm=None, sd=None):
"""
true if today is after the dates given, inclusive
Parameters
----------
sy : int
the year to check today against
sm : int or str.
the month to check today against. Should be integer or in regolith MONTHS.
default is 1
sd : int.
the day to check today against. Default is 1
Returns
-------
bool
true if today is after dates given
"""
if not sm:
sm = 1
if not sd:
sd = 1
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
start = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
return start <= time.time()
def has_finished(ey, em=None, ed=None):
"""
true if today is before the dates given, inclusive
Parameters
----------
ey : int
end year, the year to check today against
em : int or str.
end month, the month to check today against. Should be integer or in regolith MONTHS.
default is 1
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
if not em:
em = 12
if not ed:
ed = monthrange(ey, month_to_int(em))[1]
e = "{}/{}/{}".format(ed, month_to_int(em), ey)
end = time.mktime(datetime.strptime(e, "%d/%m/%Y").timetuple())
return end <= time.time()
def is_current(sy, ey, sm=None, sd=None, em=None, ed=None):
"""
true if today is between the dates given, inclusive
Parameters
----------
sy : int
start year, the year to check today is after
ey : int
end year, the year to check today is before
sm : int or str
start month, the month to check today is after. Should be integer or in
regolith MONTHS. Default is 1
sd : int
start day, the day to check today after. Default is 1
em : int or str.
end month, the month to check today against. Should be integer or in
regolith MONTHS. Default is 12
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
return has_started(sy, sm, sd) and not has_finished(ey, em, ed)
def filter_publications(citations, authors, reverse=False, bold=True):
"""Filter publications by the author(s)/editor(s)
Parameters
----------
citations : list of dict
The publication citations
authors : set of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
bold : bool, optional
If True put latex bold around the author(s) in question
"""
pubs = []
for pub in citations:
if (
len((set(pub.get("author", [])) | set(
pub.get("editor", []))) & authors)
== 0
):
continue
pub = deepcopy(pub)
if bold:
bold_self = []
for a in pub["author"]:
if a in authors:
bold_self.append("\\textbf{" + a + "}")
else:
bold_self.append(a)
pub["author"] = bold_self
else:
pub = deepcopy(pub)
pubs.append(pub)
pubs.sort(key=doc_date_key, reverse=reverse)
return pubs
def filter_projects(projects, authors, reverse=False):
"""Filter projects by the author(s)
Parameters
----------
projects : list of dict
The publication citations
authors : set of list of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
"""
projs = []
for proj in projects:
team_names = set(gets(proj["team"], "name"))
if len(team_names & authors) == 0:
continue
# FIXME delete these lines if not required. I think they are wrong (SJLB)
# proj = dict(proj)
# proj["team"] = [x for x in proj["team"] if x["name"] in authors]
projs.append(proj)
projs.sort(key=id_key, reverse=reverse)
return projs
def filter_grants(input_grants, names, pi=True, reverse=True, multi_pi=False):
"""Filter grants by those involved
Parameters
----------
input_grants : list of dict
The grants to filter
names : set of str
The authors to be filtered against
pi : bool, optional
If True add the grant amount to that person's total amount
reverse : bool, optional
If True reverse the order, defaults to False
multi_pi : bool, optional
If True compute sub-awards for multi PI grants, defaults to False
"""
grants = []
total_amount = 0.0
subaward_amount = 0.0
for grant in input_grants:
team_names = set(gets(grant["team"], "name"))
if len(team_names & names) == 0:
continue
grant = deepcopy(grant)
person = [x for x in grant["team"] if x["name"] in names][0]
if pi:
if person["position"].lower() == "pi":
total_amount += grant["amount"]
else:
continue
elif multi_pi:
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["multi_pi"] = any(gets(grant["team"], "subaward_amount"))
else:
if person["position"].lower() == "pi":
continue
else:
total_amount += grant["amount"]
subaward_amount += person.get("subaward_amount", 0.0)
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["pi"] = [
x for x in grant["team"] if x["position"].lower() == "pi"
][0]
grant["me"] = person
grants.append(grant)
grants.sort(key=ene_date_key, reverse=reverse)
return grants, total_amount, subaward_amount
def awards_grants_honors(p):
"""Make sorted awards grants and honors list.
Parameters
----------
p : dict
The person entry
"""
aghs = []
for x in p.get("funding", ()):
d = {
"description": "{0} ({1}{2:,})".format(
latex_safe(x["name"]),
x.get("currency", "$").replace("$", "\$"),
x["value"],
),
"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0)),
}
aghs.append(d)
for x in p.get("service", []) + p.get("honors", []):
d = {"description": latex_safe(x["name"])}
if "year" in x:
d.update(
{"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0))}
)
elif "begin_year" in x and "end_year" in x:
d.update(
{
"year": "{}-{}".format(x["begin_year"], x["end_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
elif "begin_year" in x:
d.update(
{
"year": "{}".format(x["begin_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
aghs.append(d)
aghs.sort(key=(lambda x: x.get("_key", 0.0)), reverse=True)
return aghs
HTTP_RE = re.compile(
r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
)
def latex_safe_url(s):
"""Makes a string that is a URL latex safe."""
return s.replace("#", r"\#")
def latex_safe(s, url_check=True, wrapper="url"):
"""Make string latex safe
Parameters
----------
s : str
url_check : bool, optional
If True check for URLs and wrap them, if False check for URL but don't
wrap, defaults to True
wrapper : str, optional
The wrapper for wrapping urls defaults to url
"""
if not s:
return s
if url_check:
# If it looks like a URL make it a latex URL
url_search = HTTP_RE.search(s)
if url_search:
url = r"{start}\{wrapper}{{{s}}}{end}".format(
start=(latex_safe(s[: url_search.start()])),
end=(latex_safe(s[url_search.end():])),
wrapper=wrapper,
s=latex_safe_url(s[url_search.start(): url_search.end()]),
)
return url
return (
s.replace("&", r"\&")
.replace("$", r"\$")
.replace("#", r"\#")
.replace("_", r"\_")
)
def make_bibtex_file(pubs, pid, person_dir="."):
"""Make a bibtex file given the publications
Parameters
----------
pubs : list of dict
The publications
pid : str
The person id
person_dir : str, optional
The person's directory
"""
if not HAVE_BIBTEX_PARSER:
return None
skip_keys = {"ID", "ENTRYTYPE", "author"}
bibdb = BibDatabase()
bibwriter = BibTexWriter()
bibdb.entries = ents = []
for pub in pubs:
ent = dict(pub)
ent["ID"] = ent.pop("_id")
ent["ENTRYTYPE"] = ent.pop("entrytype")
for n in ["author", "editor"]:
if n in ent:
ent[n] = " and ".join(ent[n])
for key in ent.keys():
if key in skip_keys:
continue
ent[key] = latex_safe(str(ent[key]))
ents.append(ent)
fname = os.path.join(person_dir, pid) + ".bib"
with open(fname, "w", encoding="utf-8") as f:
f.write(bibwriter.write(bibdb))
return fname
def document_by_value(documents, address, value):
"""Get a specific document by one of its values
Parameters
----------
documents: generator
Generator which yields the documents
address: str or tuple
The address of the data in the document
value: any
The expected value for the document
Returns
-------
dict:
The first document which matches the request
"""
if isinstance(address, str):
address = (address,)
for g_doc in documents:
doc = deepcopy(g_doc)
for add in address:
doc = doc[add]
if doc == value:
return g_doc
def fuzzy_retrieval(documents, sources, value, case_sensitive=True):
"""Retrieve a document from the documents where value is compared against
multiple potential sources
Parameters
----------
documents: generator
The documents
sources: iterable
The potential data sources
value:
The value to compare against to find the document of interest
case_sensitive: Bool
When true will match case (Default = True)
Returns
-------
dict:
The document
Examples
--------
>>> fuzzy_retrieval(people, ['aka', 'name'], 'pi_name', case_sensitive = False)
This would get the person entry for which either the alias or the name was
``pi_name``.
"""
for doc in documents:
returns = []
for k in sources:
ret = doc.get(k, [])
if not isinstance(ret, list):
ret = [ret]
returns.extend(ret)
if not case_sensitive:
returns = [reti.lower() for reti in returns if
isinstance(reti, str)]
if isinstance(value, str):
if value.lower() in frozenset(returns):
return doc
else:
if value in frozenset(returns):
return doc
def number_suffix(number):
"""returns the suffix that adjectivises a number (st, nd, rd, th)
Paramters
---------
number: integer
The number. If number is not an integer, returns an empty string
Returns
-------
suffix: string
The suffix (st, nd, rd, th)
"""
if not isinstance(number, (int, float)):
return ""
if 10 < number < 20:
suffix = "th"
else:
suffix = {1: "st", 2: "nd", 3: "rd"}.get(number % 10, "th")
return suffix
def dereference_institution(input_record, institutions):
"""Tool for replacing placeholders for institutions with the actual
institution data. Note that the replacement is done inplace
Parameters
----------
input_record : dict
The record to dereference
institutions : iterable of dicts
The institutions
"""
inst = input_record.get("institution") or input_record.get("organization")
if not inst:
error = input_record.get("position") or input_record.get("degree")
print("WARNING: no institution or organization but found {}".format(
error))
db_inst = fuzzy_retrieval(institutions, ["name", "_id", "aka"], inst)
if db_inst:
input_record["institution"] = db_inst["name"]
input_record["organization"] = db_inst["name"]
if db_inst.get("country") == "USA":
state_country = db_inst.get("state")
else:
state_country = db_inst.get("country")
input_record["location"] = "{}, {}".format(db_inst["city"],
state_country)
if not db_inst.get("departments"):
print("WARNING: no departments in {}. {} sought".format(
db_inst.get("_id"), inst))
if "department" in input_record and db_inst.get("departments"):
input_record["department"] = fuzzy_retrieval(
[db_inst["departments"]], ["name", "aka"],
input_record["department"]
)
else:
input_record["department"] = inst
def merge_collections(a, b, target_id):
"""
merge two collections into a single merged collection
for keys that are in both collections, the value in b will be kept
Parameters
----------
a the inferior collection (will lose values of shared keys)
b the superior collection (will keep values of shared keys)
target_id str the name of the key used in b to dereference ids in a
Returns
-------
the combined collection. Note that it returns a collection only containing
merged items from a and b that are dereferenced in b, i.e., the merged
intercept. If you want the union you can update the returned collection
with a.
Examples
--------
>>> grants = merge_collections(self.gtx["proposals"], self.gtx["grants"], "proposal_id")
This would merge all entries in the proposals collection with entries in the
grants collection for which "_id" in proposals has the value of
"proposal_id" in grants.
"""
adict = {}
for k in a:
adict[k.get("_id")] = k
bdict = {}
for k in b:
bdict[k.get("_id")] = k
b_for_a = {}
for k in adict:
for kk, v in bdict.items():
if v.get(target_id, "") == k:
b_for_a[k] = kk
chained = {}
for k, v in b_for_a.items():
chained[k] = ChainDB(adict[k], bdict[v])
return list(chained.values())
def update_schemas(default_schema, user_schema):
"""
Merging the user schema into the default schema recursively and return the
merged schema. The default schema and user schema will not be modified
during the merging.
Parameters
----------
default_schema : dict
The default schema.
user_schema : dict
The user defined schema.
Returns
-------
updated_schema : dict
The merged schema.
"""
updated_schema = deepcopy(default_schema)
for key in user_schema.keys():
if (key in updated_schema) and isinstance(updated_schema[key],
dict) and isinstance(
user_schema[key], dict):
updated_schema[key] = update_schemas(updated_schema[key],
user_schema[key])
else:
updated_schema[key] = user_schema[key]
return updated_schema
def is_fully_loaded(appts):
status = True
earliest, latest = date.today(), date.today()
for appt in appts:
dates = get_dates(appt)
begin_date = dates['begin_date']
end_date = dates['end_date']
if latest == date.today():
latest = end_date
appt['begin_date'] = begin_date
appt['end_date'] = end_date
if begin_date < earliest:
earliest = begin_date
if end_date > latest:
latest = end_date
datearray = []
timespan = latest - earliest
for x in range(0, timespan.days):
datearray.append(earliest + timedelta(days=x))
loading = [0] * len(datearray)
for day in datearray:
for appt in appts:
if appt['begin_date'] <= day <= appt["end_date"]:
loading[datearray.index(day)] = loading[datearray.index(day)] + \
appt.get("loading")
if max(loading) > 1.0:
status = False
print("max {} at {}".format(max(loading),
datearray[
list(loading).index(max(loading))]))
elif min(loading) < 1.0:
status = False
print("min {} at {}".format(min(loading),
datearray[list(loading).index(min(loading))]
))
return status
# MASKED: group function (lines 801-836)
def get_pi_id(rc):
"""
Gets the database id of the group PI
Parameters
----------
rc: runcontrol object
The runcontrol object. It must contain the 'groups' and 'people'
collections in the needed databases
Returns
-------
The database '_id' of the group PI
"""
groupiter = list(all_docs_from_collection(rc.client, "groups"))
peoplecoll = all_docs_from_collection(rc.client, "people")
pi_ref = [i.get("pi_name") for i in groupiter if
i.get("name").casefold() == rc.groupname.casefold()]
pi = fuzzy_retrieval(peoplecoll, ["_id", "aka", "name"], pi_ref[0])
return pi.get("_id")
def group_member_ids(ppl_coll, grpname):
"""Get a list of all group member ids
Parameters
----------
ppl_coll: collection (list of dicts)
The people collection that should contain the group members
grp: string
The id of the group in groups.yml
Returns
-------
set:
The set of ids of the people in the group
Notes
-----
- Groups that are being tracked are listed in the groups.yml collection
with a name and an id.
- People are in a group during an educational or employment period.
- To assign a person to a tracked group during one such period, add
a "group" key to that education/employment item with a value
that is the group id.
- This function takes the group id that is passed and searches
the people collection for all people that have been
assigned to that group in some period of time and returns a list of
"""
grpmembers = set()
for person in ppl_coll:
for k in ["education", "employment"]:
for position in person.get(k, {}):
if position.get("group", None) == grpname:
grpmembers.add(person["_id"])
return grpmembers
|
def group(db, by):
"""
Group the document in the database according to the value of the doc[by] in db.
Parameters
----------
db : iterable
The database of documents.
by : basestring
The key to group the documents.
Returns
-------
grouped: dict
A dictionary mapping the feature value of group to the list of docs. All docs in the same generator have
the same value of doc[by].
Examples
--------
Here, we use a tuple of dict as an example of the database.
>>> db = ({"k": "v0"}, {"k": "v1"}, {"k": "v0"})
>>> group(db)
This will return
>>> {"v0": [{"k": "v0"}, {"k": "v0"}], "v1": [{"k": "v1"}]}
"""
grouped = {}
doc: dict
for doc in db:
key = doc.get(by)
if not key:
print("There is no field {} in {}".format(by, id_key(doc)))
elif key not in grouped:
grouped[key] = [doc]
else:
grouped[key].append(doc)
return grouped
| 801 | 836 |
"""Misc. regolith tools.
"""
import email.utils
import os
import platform
import re
import sys
import time
from copy import deepcopy
from calendar import monthrange
from datetime import datetime, date, timedelta
from regolith.dates import month_to_int, date_to_float, get_dates
from regolith.sorters import doc_date_key, id_key, ene_date_key
from regolith.chained_db import ChainDB
try:
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
HAVE_BIBTEX_PARSER = True
except ImportError:
HAVE_BIBTEX_PARSER = False
LATEX_OPTS = ["-halt-on-error", "-file-line-error"]
if sys.version_info[0] >= 3:
string_types = (str, bytes)
unicode_type = str
else:
pass
# string_types = (str, unicode)
# unicode_type = unicode
DEFAULT_ENCODING = sys.getdefaultencoding()
ON_WINDOWS = platform.system() == "Windows"
ON_MAC = platform.system() == "Darwin"
ON_LINUX = platform.system() == "Linux"
ON_POSIX = os.name == "posix"
def dbdirname(db, rc):
"""Gets the database dir name."""
if db.get("local", False) is False:
dbsdir = os.path.join(rc.builddir, "_dbs")
dbdir = os.path.join(dbsdir, db["name"])
else:
dbdir = db["url"]
return dbdir
def dbpathname(db, rc):
"""Gets the database path name."""
dbdir = dbdirname(db, rc)
dbpath = os.path.join(dbdir, db["path"])
return dbpath
def fallback(cond, backup):
"""Decorator for returning the object if cond is true and a backup if
cond is false. """
def dec(obj):
return obj if cond else backup
return dec
def all_docs_from_collection(client, collname, copy=True):
"""Yield all entries in for all collections of a given name in a given
database. """
yield from client.all_documents(collname, copy=copy)
SHORT_MONTH_NAMES = (
None,
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sept",
"Oct",
"Nov",
"Dec",
)
def date_to_rfc822(y, m, d=1):
"""Converts a date to an RFC 822 formatted string."""
d = datetime(int(y), month_to_int(m), int(d))
return email.utils.format_datetime(d)
def rfc822now():
"""Creates a string of the current time according to RFC 822."""
now = datetime.utcnow()
return email.utils.format_datetime(now)
def gets(seq, key, default=None):
"""Gets a key from every element of a sequence if possible."""
for x in seq:
yield x.get(key, default)
def month_and_year(m=None, y=None):
"""Creates a string from month and year data, if available."""
if y is None:
return "present"
if m is None:
return str(y)
m = month_to_int(m)
return "{0} {1}".format(SHORT_MONTH_NAMES[m], y)
def is_since(y, sy, m=1, d=1, sm=1, sd=1):
"""
tests whether a date is on or since another date
Parameters
----------
y : int
the year to be tested
sy : int
the since year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
sd: int
the since day. Optional, defaults to 1
Returns
-------
True if the target date is the same as, or more recent than, the since date
"""
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
d = "{}/{}/{}".format(d, month_to_int(m), y)
since = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return since <= date
def is_before(y, by, m=12, d=None, bm=12, bd=None):
"""
tests whether a date is on or before another date
Parameters
----------
y : int
the year to be tested
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Dec
d : int
the day to be tested. Defaults to last day of the month
bm : int or str
the before month. Optional, defaults to Dec
bd: int
the before day. Optional, defaults to last day of the month
Returns
-------
True if the target date is the same as, or earlier than, the before date
"""
if not d:
d = monthrange(y, month_to_int(m))[1]
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
b = "{}/{}/{}".format(bd, month_to_int(bm), by)
d = "{}/{}/{}".format(d, month_to_int(m), y)
before = time.mktime(datetime.strptime(b, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return before >= date
def is_between(y, sy, by, m=1, d=1, sm=1, sd=1, bm=12, bd=None):
"""
tests whether a date is on or between two other dates
returns true if the target date is between the since date and the before
date, inclusive.
Parameters
----------
y : int
the year to be tested
sy : int
the since year
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
bm : int or str
the before month. Optional, defaults to Dec
sd: int
the since day. Optional, defaults to 1
bd: int
the before day. Optional, defaults to 28
Returns
-------
True if the target date is between the since date and the before date,
inclusive (i.e., returns true if the target date is the same as either the
since date or the before date)
"""
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
return is_since(y, sy, m=m, d=d, sm=sm, sd=sd) and is_before(
y, by, m=m, d=d, bm=bm, bd=bd
)
def has_started(sy, sm=None, sd=None):
"""
true if today is after the dates given, inclusive
Parameters
----------
sy : int
the year to check today against
sm : int or str.
the month to check today against. Should be integer or in regolith MONTHS.
default is 1
sd : int.
the day to check today against. Default is 1
Returns
-------
bool
true if today is after dates given
"""
if not sm:
sm = 1
if not sd:
sd = 1
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
start = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
return start <= time.time()
def has_finished(ey, em=None, ed=None):
"""
true if today is before the dates given, inclusive
Parameters
----------
ey : int
end year, the year to check today against
em : int or str.
end month, the month to check today against. Should be integer or in regolith MONTHS.
default is 1
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
if not em:
em = 12
if not ed:
ed = monthrange(ey, month_to_int(em))[1]
e = "{}/{}/{}".format(ed, month_to_int(em), ey)
end = time.mktime(datetime.strptime(e, "%d/%m/%Y").timetuple())
return end <= time.time()
def is_current(sy, ey, sm=None, sd=None, em=None, ed=None):
"""
true if today is between the dates given, inclusive
Parameters
----------
sy : int
start year, the year to check today is after
ey : int
end year, the year to check today is before
sm : int or str
start month, the month to check today is after. Should be integer or in
regolith MONTHS. Default is 1
sd : int
start day, the day to check today after. Default is 1
em : int or str.
end month, the month to check today against. Should be integer or in
regolith MONTHS. Default is 12
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
return has_started(sy, sm, sd) and not has_finished(ey, em, ed)
def filter_publications(citations, authors, reverse=False, bold=True):
"""Filter publications by the author(s)/editor(s)
Parameters
----------
citations : list of dict
The publication citations
authors : set of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
bold : bool, optional
If True put latex bold around the author(s) in question
"""
pubs = []
for pub in citations:
if (
len((set(pub.get("author", [])) | set(
pub.get("editor", []))) & authors)
== 0
):
continue
pub = deepcopy(pub)
if bold:
bold_self = []
for a in pub["author"]:
if a in authors:
bold_self.append("\\textbf{" + a + "}")
else:
bold_self.append(a)
pub["author"] = bold_self
else:
pub = deepcopy(pub)
pubs.append(pub)
pubs.sort(key=doc_date_key, reverse=reverse)
return pubs
def filter_projects(projects, authors, reverse=False):
"""Filter projects by the author(s)
Parameters
----------
projects : list of dict
The publication citations
authors : set of list of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
"""
projs = []
for proj in projects:
team_names = set(gets(proj["team"], "name"))
if len(team_names & authors) == 0:
continue
# FIXME delete these lines if not required. I think they are wrong (SJLB)
# proj = dict(proj)
# proj["team"] = [x for x in proj["team"] if x["name"] in authors]
projs.append(proj)
projs.sort(key=id_key, reverse=reverse)
return projs
def filter_grants(input_grants, names, pi=True, reverse=True, multi_pi=False):
"""Filter grants by those involved
Parameters
----------
input_grants : list of dict
The grants to filter
names : set of str
The authors to be filtered against
pi : bool, optional
If True add the grant amount to that person's total amount
reverse : bool, optional
If True reverse the order, defaults to False
multi_pi : bool, optional
If True compute sub-awards for multi PI grants, defaults to False
"""
grants = []
total_amount = 0.0
subaward_amount = 0.0
for grant in input_grants:
team_names = set(gets(grant["team"], "name"))
if len(team_names & names) == 0:
continue
grant = deepcopy(grant)
person = [x for x in grant["team"] if x["name"] in names][0]
if pi:
if person["position"].lower() == "pi":
total_amount += grant["amount"]
else:
continue
elif multi_pi:
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["multi_pi"] = any(gets(grant["team"], "subaward_amount"))
else:
if person["position"].lower() == "pi":
continue
else:
total_amount += grant["amount"]
subaward_amount += person.get("subaward_amount", 0.0)
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["pi"] = [
x for x in grant["team"] if x["position"].lower() == "pi"
][0]
grant["me"] = person
grants.append(grant)
grants.sort(key=ene_date_key, reverse=reverse)
return grants, total_amount, subaward_amount
def awards_grants_honors(p):
"""Make sorted awards grants and honors list.
Parameters
----------
p : dict
The person entry
"""
aghs = []
for x in p.get("funding", ()):
d = {
"description": "{0} ({1}{2:,})".format(
latex_safe(x["name"]),
x.get("currency", "$").replace("$", "\$"),
x["value"],
),
"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0)),
}
aghs.append(d)
for x in p.get("service", []) + p.get("honors", []):
d = {"description": latex_safe(x["name"])}
if "year" in x:
d.update(
{"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0))}
)
elif "begin_year" in x and "end_year" in x:
d.update(
{
"year": "{}-{}".format(x["begin_year"], x["end_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
elif "begin_year" in x:
d.update(
{
"year": "{}".format(x["begin_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
aghs.append(d)
aghs.sort(key=(lambda x: x.get("_key", 0.0)), reverse=True)
return aghs
HTTP_RE = re.compile(
r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
)
def latex_safe_url(s):
"""Makes a string that is a URL latex safe."""
return s.replace("#", r"\#")
def latex_safe(s, url_check=True, wrapper="url"):
"""Make string latex safe
Parameters
----------
s : str
url_check : bool, optional
If True check for URLs and wrap them, if False check for URL but don't
wrap, defaults to True
wrapper : str, optional
The wrapper for wrapping urls defaults to url
"""
if not s:
return s
if url_check:
# If it looks like a URL make it a latex URL
url_search = HTTP_RE.search(s)
if url_search:
url = r"{start}\{wrapper}{{{s}}}{end}".format(
start=(latex_safe(s[: url_search.start()])),
end=(latex_safe(s[url_search.end():])),
wrapper=wrapper,
s=latex_safe_url(s[url_search.start(): url_search.end()]),
)
return url
return (
s.replace("&", r"\&")
.replace("$", r"\$")
.replace("#", r"\#")
.replace("_", r"\_")
)
def make_bibtex_file(pubs, pid, person_dir="."):
"""Make a bibtex file given the publications
Parameters
----------
pubs : list of dict
The publications
pid : str
The person id
person_dir : str, optional
The person's directory
"""
if not HAVE_BIBTEX_PARSER:
return None
skip_keys = {"ID", "ENTRYTYPE", "author"}
bibdb = BibDatabase()
bibwriter = BibTexWriter()
bibdb.entries = ents = []
for pub in pubs:
ent = dict(pub)
ent["ID"] = ent.pop("_id")
ent["ENTRYTYPE"] = ent.pop("entrytype")
for n in ["author", "editor"]:
if n in ent:
ent[n] = " and ".join(ent[n])
for key in ent.keys():
if key in skip_keys:
continue
ent[key] = latex_safe(str(ent[key]))
ents.append(ent)
fname = os.path.join(person_dir, pid) + ".bib"
with open(fname, "w", encoding="utf-8") as f:
f.write(bibwriter.write(bibdb))
return fname
def document_by_value(documents, address, value):
"""Get a specific document by one of its values
Parameters
----------
documents: generator
Generator which yields the documents
address: str or tuple
The address of the data in the document
value: any
The expected value for the document
Returns
-------
dict:
The first document which matches the request
"""
if isinstance(address, str):
address = (address,)
for g_doc in documents:
doc = deepcopy(g_doc)
for add in address:
doc = doc[add]
if doc == value:
return g_doc
def fuzzy_retrieval(documents, sources, value, case_sensitive=True):
"""Retrieve a document from the documents where value is compared against
multiple potential sources
Parameters
----------
documents: generator
The documents
sources: iterable
The potential data sources
value:
The value to compare against to find the document of interest
case_sensitive: Bool
When true will match case (Default = True)
Returns
-------
dict:
The document
Examples
--------
>>> fuzzy_retrieval(people, ['aka', 'name'], 'pi_name', case_sensitive = False)
This would get the person entry for which either the alias or the name was
``pi_name``.
"""
for doc in documents:
returns = []
for k in sources:
ret = doc.get(k, [])
if not isinstance(ret, list):
ret = [ret]
returns.extend(ret)
if not case_sensitive:
returns = [reti.lower() for reti in returns if
isinstance(reti, str)]
if isinstance(value, str):
if value.lower() in frozenset(returns):
return doc
else:
if value in frozenset(returns):
return doc
def number_suffix(number):
"""returns the suffix that adjectivises a number (st, nd, rd, th)
Paramters
---------
number: integer
The number. If number is not an integer, returns an empty string
Returns
-------
suffix: string
The suffix (st, nd, rd, th)
"""
if not isinstance(number, (int, float)):
return ""
if 10 < number < 20:
suffix = "th"
else:
suffix = {1: "st", 2: "nd", 3: "rd"}.get(number % 10, "th")
return suffix
def dereference_institution(input_record, institutions):
"""Tool for replacing placeholders for institutions with the actual
institution data. Note that the replacement is done inplace
Parameters
----------
input_record : dict
The record to dereference
institutions : iterable of dicts
The institutions
"""
inst = input_record.get("institution") or input_record.get("organization")
if not inst:
error = input_record.get("position") or input_record.get("degree")
print("WARNING: no institution or organization but found {}".format(
error))
db_inst = fuzzy_retrieval(institutions, ["name", "_id", "aka"], inst)
if db_inst:
input_record["institution"] = db_inst["name"]
input_record["organization"] = db_inst["name"]
if db_inst.get("country") == "USA":
state_country = db_inst.get("state")
else:
state_country = db_inst.get("country")
input_record["location"] = "{}, {}".format(db_inst["city"],
state_country)
if not db_inst.get("departments"):
print("WARNING: no departments in {}. {} sought".format(
db_inst.get("_id"), inst))
if "department" in input_record and db_inst.get("departments"):
input_record["department"] = fuzzy_retrieval(
[db_inst["departments"]], ["name", "aka"],
input_record["department"]
)
else:
input_record["department"] = inst
def merge_collections(a, b, target_id):
"""
merge two collections into a single merged collection
for keys that are in both collections, the value in b will be kept
Parameters
----------
a the inferior collection (will lose values of shared keys)
b the superior collection (will keep values of shared keys)
target_id str the name of the key used in b to dereference ids in a
Returns
-------
the combined collection. Note that it returns a collection only containing
merged items from a and b that are dereferenced in b, i.e., the merged
intercept. If you want the union you can update the returned collection
with a.
Examples
--------
>>> grants = merge_collections(self.gtx["proposals"], self.gtx["grants"], "proposal_id")
This would merge all entries in the proposals collection with entries in the
grants collection for which "_id" in proposals has the value of
"proposal_id" in grants.
"""
adict = {}
for k in a:
adict[k.get("_id")] = k
bdict = {}
for k in b:
bdict[k.get("_id")] = k
b_for_a = {}
for k in adict:
for kk, v in bdict.items():
if v.get(target_id, "") == k:
b_for_a[k] = kk
chained = {}
for k, v in b_for_a.items():
chained[k] = ChainDB(adict[k], bdict[v])
return list(chained.values())
def update_schemas(default_schema, user_schema):
"""
Merging the user schema into the default schema recursively and return the
merged schema. The default schema and user schema will not be modified
during the merging.
Parameters
----------
default_schema : dict
The default schema.
user_schema : dict
The user defined schema.
Returns
-------
updated_schema : dict
The merged schema.
"""
updated_schema = deepcopy(default_schema)
for key in user_schema.keys():
if (key in updated_schema) and isinstance(updated_schema[key],
dict) and isinstance(
user_schema[key], dict):
updated_schema[key] = update_schemas(updated_schema[key],
user_schema[key])
else:
updated_schema[key] = user_schema[key]
return updated_schema
def is_fully_loaded(appts):
status = True
earliest, latest = date.today(), date.today()
for appt in appts:
dates = get_dates(appt)
begin_date = dates['begin_date']
end_date = dates['end_date']
if latest == date.today():
latest = end_date
appt['begin_date'] = begin_date
appt['end_date'] = end_date
if begin_date < earliest:
earliest = begin_date
if end_date > latest:
latest = end_date
datearray = []
timespan = latest - earliest
for x in range(0, timespan.days):
datearray.append(earliest + timedelta(days=x))
loading = [0] * len(datearray)
for day in datearray:
for appt in appts:
if appt['begin_date'] <= day <= appt["end_date"]:
loading[datearray.index(day)] = loading[datearray.index(day)] + \
appt.get("loading")
if max(loading) > 1.0:
status = False
print("max {} at {}".format(max(loading),
datearray[
list(loading).index(max(loading))]))
elif min(loading) < 1.0:
status = False
print("min {} at {}".format(min(loading),
datearray[list(loading).index(min(loading))]
))
return status
def group(db, by):
"""
Group the document in the database according to the value of the doc[by] in db.
Parameters
----------
db : iterable
The database of documents.
by : basestring
The key to group the documents.
Returns
-------
grouped: dict
A dictionary mapping the feature value of group to the list of docs. All docs in the same generator have
the same value of doc[by].
Examples
--------
Here, we use a tuple of dict as an example of the database.
>>> db = ({"k": "v0"}, {"k": "v1"}, {"k": "v0"})
>>> group(db)
This will return
>>> {"v0": [{"k": "v0"}, {"k": "v0"}], "v1": [{"k": "v1"}]}
"""
grouped = {}
doc: dict
for doc in db:
key = doc.get(by)
if not key:
print("There is no field {} in {}".format(by, id_key(doc)))
elif key not in grouped:
grouped[key] = [doc]
else:
grouped[key].append(doc)
return grouped
def get_pi_id(rc):
"""
Gets the database id of the group PI
Parameters
----------
rc: runcontrol object
The runcontrol object. It must contain the 'groups' and 'people'
collections in the needed databases
Returns
-------
The database '_id' of the group PI
"""
groupiter = list(all_docs_from_collection(rc.client, "groups"))
peoplecoll = all_docs_from_collection(rc.client, "people")
pi_ref = [i.get("pi_name") for i in groupiter if
i.get("name").casefold() == rc.groupname.casefold()]
pi = fuzzy_retrieval(peoplecoll, ["_id", "aka", "name"], pi_ref[0])
return pi.get("_id")
def group_member_ids(ppl_coll, grpname):
"""Get a list of all group member ids
Parameters
----------
ppl_coll: collection (list of dicts)
The people collection that should contain the group members
grp: string
The id of the group in groups.yml
Returns
-------
set:
The set of ids of the people in the group
Notes
-----
- Groups that are being tracked are listed in the groups.yml collection
with a name and an id.
- People are in a group during an educational or employment period.
- To assign a person to a tracked group during one such period, add
a "group" key to that education/employment item with a value
that is the group id.
- This function takes the group id that is passed and searches
the people collection for all people that have been
assigned to that group in some period of time and returns a list of
"""
grpmembers = set()
for person in ppl_coll:
for k in ["education", "employment"]:
for position in person.get(k, {}):
if position.get("group", None) == grpname:
grpmembers.add(person["_id"])
return grpmembers
|
group_member_ids
|
Get a list of all group member ids
Parameters
----------
ppl_coll: collection (list of dicts)
The people collection that should contain the group members
grp: string
The id of the group in groups.yml
Returns
-------
set:
The set of ids of the people in the group
Notes
-----
- Groups that are being tracked are listed in the groups.yml collection
with a name and an id.
- People are in a group during an educational or employment period.
- To assign a person to a tracked group during one such period, add
a "group" key to that education/employment item with a value
that is the group id.
- This function takes the group id that is passed and searches
the people collection for all people that have been
assigned to that group in some period of time and returns a list of
|
"""Misc. regolith tools.
"""
import email.utils
import os
import platform
import re
import sys
import time
from copy import deepcopy
from calendar import monthrange
from datetime import datetime, date, timedelta
from regolith.dates import month_to_int, date_to_float, get_dates
from regolith.sorters import doc_date_key, id_key, ene_date_key
from regolith.chained_db import ChainDB
try:
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
HAVE_BIBTEX_PARSER = True
except ImportError:
HAVE_BIBTEX_PARSER = False
LATEX_OPTS = ["-halt-on-error", "-file-line-error"]
if sys.version_info[0] >= 3:
string_types = (str, bytes)
unicode_type = str
else:
pass
# string_types = (str, unicode)
# unicode_type = unicode
DEFAULT_ENCODING = sys.getdefaultencoding()
ON_WINDOWS = platform.system() == "Windows"
ON_MAC = platform.system() == "Darwin"
ON_LINUX = platform.system() == "Linux"
ON_POSIX = os.name == "posix"
def dbdirname(db, rc):
"""Gets the database dir name."""
if db.get("local", False) is False:
dbsdir = os.path.join(rc.builddir, "_dbs")
dbdir = os.path.join(dbsdir, db["name"])
else:
dbdir = db["url"]
return dbdir
def dbpathname(db, rc):
"""Gets the database path name."""
dbdir = dbdirname(db, rc)
dbpath = os.path.join(dbdir, db["path"])
return dbpath
def fallback(cond, backup):
"""Decorator for returning the object if cond is true and a backup if
cond is false. """
def dec(obj):
return obj if cond else backup
return dec
def all_docs_from_collection(client, collname, copy=True):
"""Yield all entries in for all collections of a given name in a given
database. """
yield from client.all_documents(collname, copy=copy)
SHORT_MONTH_NAMES = (
None,
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sept",
"Oct",
"Nov",
"Dec",
)
def date_to_rfc822(y, m, d=1):
"""Converts a date to an RFC 822 formatted string."""
d = datetime(int(y), month_to_int(m), int(d))
return email.utils.format_datetime(d)
def rfc822now():
"""Creates a string of the current time according to RFC 822."""
now = datetime.utcnow()
return email.utils.format_datetime(now)
def gets(seq, key, default=None):
"""Gets a key from every element of a sequence if possible."""
for x in seq:
yield x.get(key, default)
def month_and_year(m=None, y=None):
"""Creates a string from month and year data, if available."""
if y is None:
return "present"
if m is None:
return str(y)
m = month_to_int(m)
return "{0} {1}".format(SHORT_MONTH_NAMES[m], y)
def is_since(y, sy, m=1, d=1, sm=1, sd=1):
"""
tests whether a date is on or since another date
Parameters
----------
y : int
the year to be tested
sy : int
the since year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
sd: int
the since day. Optional, defaults to 1
Returns
-------
True if the target date is the same as, or more recent than, the since date
"""
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
d = "{}/{}/{}".format(d, month_to_int(m), y)
since = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return since <= date
def is_before(y, by, m=12, d=None, bm=12, bd=None):
"""
tests whether a date is on or before another date
Parameters
----------
y : int
the year to be tested
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Dec
d : int
the day to be tested. Defaults to last day of the month
bm : int or str
the before month. Optional, defaults to Dec
bd: int
the before day. Optional, defaults to last day of the month
Returns
-------
True if the target date is the same as, or earlier than, the before date
"""
if not d:
d = monthrange(y, month_to_int(m))[1]
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
b = "{}/{}/{}".format(bd, month_to_int(bm), by)
d = "{}/{}/{}".format(d, month_to_int(m), y)
before = time.mktime(datetime.strptime(b, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return before >= date
def is_between(y, sy, by, m=1, d=1, sm=1, sd=1, bm=12, bd=None):
"""
tests whether a date is on or between two other dates
returns true if the target date is between the since date and the before
date, inclusive.
Parameters
----------
y : int
the year to be tested
sy : int
the since year
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
bm : int or str
the before month. Optional, defaults to Dec
sd: int
the since day. Optional, defaults to 1
bd: int
the before day. Optional, defaults to 28
Returns
-------
True if the target date is between the since date and the before date,
inclusive (i.e., returns true if the target date is the same as either the
since date or the before date)
"""
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
return is_since(y, sy, m=m, d=d, sm=sm, sd=sd) and is_before(
y, by, m=m, d=d, bm=bm, bd=bd
)
def has_started(sy, sm=None, sd=None):
"""
true if today is after the dates given, inclusive
Parameters
----------
sy : int
the year to check today against
sm : int or str.
the month to check today against. Should be integer or in regolith MONTHS.
default is 1
sd : int.
the day to check today against. Default is 1
Returns
-------
bool
true if today is after dates given
"""
if not sm:
sm = 1
if not sd:
sd = 1
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
start = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
return start <= time.time()
def has_finished(ey, em=None, ed=None):
"""
true if today is before the dates given, inclusive
Parameters
----------
ey : int
end year, the year to check today against
em : int or str.
end month, the month to check today against. Should be integer or in regolith MONTHS.
default is 1
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
if not em:
em = 12
if not ed:
ed = monthrange(ey, month_to_int(em))[1]
e = "{}/{}/{}".format(ed, month_to_int(em), ey)
end = time.mktime(datetime.strptime(e, "%d/%m/%Y").timetuple())
return end <= time.time()
def is_current(sy, ey, sm=None, sd=None, em=None, ed=None):
"""
true if today is between the dates given, inclusive
Parameters
----------
sy : int
start year, the year to check today is after
ey : int
end year, the year to check today is before
sm : int or str
start month, the month to check today is after. Should be integer or in
regolith MONTHS. Default is 1
sd : int
start day, the day to check today after. Default is 1
em : int or str.
end month, the month to check today against. Should be integer or in
regolith MONTHS. Default is 12
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
return has_started(sy, sm, sd) and not has_finished(ey, em, ed)
def filter_publications(citations, authors, reverse=False, bold=True):
"""Filter publications by the author(s)/editor(s)
Parameters
----------
citations : list of dict
The publication citations
authors : set of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
bold : bool, optional
If True put latex bold around the author(s) in question
"""
pubs = []
for pub in citations:
if (
len((set(pub.get("author", [])) | set(
pub.get("editor", []))) & authors)
== 0
):
continue
pub = deepcopy(pub)
if bold:
bold_self = []
for a in pub["author"]:
if a in authors:
bold_self.append("\\textbf{" + a + "}")
else:
bold_self.append(a)
pub["author"] = bold_self
else:
pub = deepcopy(pub)
pubs.append(pub)
pubs.sort(key=doc_date_key, reverse=reverse)
return pubs
def filter_projects(projects, authors, reverse=False):
"""Filter projects by the author(s)
Parameters
----------
projects : list of dict
The publication citations
authors : set of list of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
"""
projs = []
for proj in projects:
team_names = set(gets(proj["team"], "name"))
if len(team_names & authors) == 0:
continue
# FIXME delete these lines if not required. I think they are wrong (SJLB)
# proj = dict(proj)
# proj["team"] = [x for x in proj["team"] if x["name"] in authors]
projs.append(proj)
projs.sort(key=id_key, reverse=reverse)
return projs
def filter_grants(input_grants, names, pi=True, reverse=True, multi_pi=False):
"""Filter grants by those involved
Parameters
----------
input_grants : list of dict
The grants to filter
names : set of str
The authors to be filtered against
pi : bool, optional
If True add the grant amount to that person's total amount
reverse : bool, optional
If True reverse the order, defaults to False
multi_pi : bool, optional
If True compute sub-awards for multi PI grants, defaults to False
"""
grants = []
total_amount = 0.0
subaward_amount = 0.0
for grant in input_grants:
team_names = set(gets(grant["team"], "name"))
if len(team_names & names) == 0:
continue
grant = deepcopy(grant)
person = [x for x in grant["team"] if x["name"] in names][0]
if pi:
if person["position"].lower() == "pi":
total_amount += grant["amount"]
else:
continue
elif multi_pi:
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["multi_pi"] = any(gets(grant["team"], "subaward_amount"))
else:
if person["position"].lower() == "pi":
continue
else:
total_amount += grant["amount"]
subaward_amount += person.get("subaward_amount", 0.0)
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["pi"] = [
x for x in grant["team"] if x["position"].lower() == "pi"
][0]
grant["me"] = person
grants.append(grant)
grants.sort(key=ene_date_key, reverse=reverse)
return grants, total_amount, subaward_amount
def awards_grants_honors(p):
"""Make sorted awards grants and honors list.
Parameters
----------
p : dict
The person entry
"""
aghs = []
for x in p.get("funding", ()):
d = {
"description": "{0} ({1}{2:,})".format(
latex_safe(x["name"]),
x.get("currency", "$").replace("$", "\$"),
x["value"],
),
"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0)),
}
aghs.append(d)
for x in p.get("service", []) + p.get("honors", []):
d = {"description": latex_safe(x["name"])}
if "year" in x:
d.update(
{"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0))}
)
elif "begin_year" in x and "end_year" in x:
d.update(
{
"year": "{}-{}".format(x["begin_year"], x["end_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
elif "begin_year" in x:
d.update(
{
"year": "{}".format(x["begin_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
aghs.append(d)
aghs.sort(key=(lambda x: x.get("_key", 0.0)), reverse=True)
return aghs
HTTP_RE = re.compile(
r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
)
def latex_safe_url(s):
"""Makes a string that is a URL latex safe."""
return s.replace("#", r"\#")
def latex_safe(s, url_check=True, wrapper="url"):
"""Make string latex safe
Parameters
----------
s : str
url_check : bool, optional
If True check for URLs and wrap them, if False check for URL but don't
wrap, defaults to True
wrapper : str, optional
The wrapper for wrapping urls defaults to url
"""
if not s:
return s
if url_check:
# If it looks like a URL make it a latex URL
url_search = HTTP_RE.search(s)
if url_search:
url = r"{start}\{wrapper}{{{s}}}{end}".format(
start=(latex_safe(s[: url_search.start()])),
end=(latex_safe(s[url_search.end():])),
wrapper=wrapper,
s=latex_safe_url(s[url_search.start(): url_search.end()]),
)
return url
return (
s.replace("&", r"\&")
.replace("$", r"\$")
.replace("#", r"\#")
.replace("_", r"\_")
)
def make_bibtex_file(pubs, pid, person_dir="."):
"""Make a bibtex file given the publications
Parameters
----------
pubs : list of dict
The publications
pid : str
The person id
person_dir : str, optional
The person's directory
"""
if not HAVE_BIBTEX_PARSER:
return None
skip_keys = {"ID", "ENTRYTYPE", "author"}
bibdb = BibDatabase()
bibwriter = BibTexWriter()
bibdb.entries = ents = []
for pub in pubs:
ent = dict(pub)
ent["ID"] = ent.pop("_id")
ent["ENTRYTYPE"] = ent.pop("entrytype")
for n in ["author", "editor"]:
if n in ent:
ent[n] = " and ".join(ent[n])
for key in ent.keys():
if key in skip_keys:
continue
ent[key] = latex_safe(str(ent[key]))
ents.append(ent)
fname = os.path.join(person_dir, pid) + ".bib"
with open(fname, "w", encoding="utf-8") as f:
f.write(bibwriter.write(bibdb))
return fname
def document_by_value(documents, address, value):
"""Get a specific document by one of its values
Parameters
----------
documents: generator
Generator which yields the documents
address: str or tuple
The address of the data in the document
value: any
The expected value for the document
Returns
-------
dict:
The first document which matches the request
"""
if isinstance(address, str):
address = (address,)
for g_doc in documents:
doc = deepcopy(g_doc)
for add in address:
doc = doc[add]
if doc == value:
return g_doc
def fuzzy_retrieval(documents, sources, value, case_sensitive=True):
"""Retrieve a document from the documents where value is compared against
multiple potential sources
Parameters
----------
documents: generator
The documents
sources: iterable
The potential data sources
value:
The value to compare against to find the document of interest
case_sensitive: Bool
When true will match case (Default = True)
Returns
-------
dict:
The document
Examples
--------
>>> fuzzy_retrieval(people, ['aka', 'name'], 'pi_name', case_sensitive = False)
This would get the person entry for which either the alias or the name was
``pi_name``.
"""
for doc in documents:
returns = []
for k in sources:
ret = doc.get(k, [])
if not isinstance(ret, list):
ret = [ret]
returns.extend(ret)
if not case_sensitive:
returns = [reti.lower() for reti in returns if
isinstance(reti, str)]
if isinstance(value, str):
if value.lower() in frozenset(returns):
return doc
else:
if value in frozenset(returns):
return doc
def number_suffix(number):
"""returns the suffix that adjectivises a number (st, nd, rd, th)
Paramters
---------
number: integer
The number. If number is not an integer, returns an empty string
Returns
-------
suffix: string
The suffix (st, nd, rd, th)
"""
if not isinstance(number, (int, float)):
return ""
if 10 < number < 20:
suffix = "th"
else:
suffix = {1: "st", 2: "nd", 3: "rd"}.get(number % 10, "th")
return suffix
def dereference_institution(input_record, institutions):
"""Tool for replacing placeholders for institutions with the actual
institution data. Note that the replacement is done inplace
Parameters
----------
input_record : dict
The record to dereference
institutions : iterable of dicts
The institutions
"""
inst = input_record.get("institution") or input_record.get("organization")
if not inst:
error = input_record.get("position") or input_record.get("degree")
print("WARNING: no institution or organization but found {}".format(
error))
db_inst = fuzzy_retrieval(institutions, ["name", "_id", "aka"], inst)
if db_inst:
input_record["institution"] = db_inst["name"]
input_record["organization"] = db_inst["name"]
if db_inst.get("country") == "USA":
state_country = db_inst.get("state")
else:
state_country = db_inst.get("country")
input_record["location"] = "{}, {}".format(db_inst["city"],
state_country)
if not db_inst.get("departments"):
print("WARNING: no departments in {}. {} sought".format(
db_inst.get("_id"), inst))
if "department" in input_record and db_inst.get("departments"):
input_record["department"] = fuzzy_retrieval(
[db_inst["departments"]], ["name", "aka"],
input_record["department"]
)
else:
input_record["department"] = inst
def merge_collections(a, b, target_id):
"""
merge two collections into a single merged collection
for keys that are in both collections, the value in b will be kept
Parameters
----------
a the inferior collection (will lose values of shared keys)
b the superior collection (will keep values of shared keys)
target_id str the name of the key used in b to dereference ids in a
Returns
-------
the combined collection. Note that it returns a collection only containing
merged items from a and b that are dereferenced in b, i.e., the merged
intercept. If you want the union you can update the returned collection
with a.
Examples
--------
>>> grants = merge_collections(self.gtx["proposals"], self.gtx["grants"], "proposal_id")
This would merge all entries in the proposals collection with entries in the
grants collection for which "_id" in proposals has the value of
"proposal_id" in grants.
"""
adict = {}
for k in a:
adict[k.get("_id")] = k
bdict = {}
for k in b:
bdict[k.get("_id")] = k
b_for_a = {}
for k in adict:
for kk, v in bdict.items():
if v.get(target_id, "") == k:
b_for_a[k] = kk
chained = {}
for k, v in b_for_a.items():
chained[k] = ChainDB(adict[k], bdict[v])
return list(chained.values())
def update_schemas(default_schema, user_schema):
"""
Merging the user schema into the default schema recursively and return the
merged schema. The default schema and user schema will not be modified
during the merging.
Parameters
----------
default_schema : dict
The default schema.
user_schema : dict
The user defined schema.
Returns
-------
updated_schema : dict
The merged schema.
"""
updated_schema = deepcopy(default_schema)
for key in user_schema.keys():
if (key in updated_schema) and isinstance(updated_schema[key],
dict) and isinstance(
user_schema[key], dict):
updated_schema[key] = update_schemas(updated_schema[key],
user_schema[key])
else:
updated_schema[key] = user_schema[key]
return updated_schema
def is_fully_loaded(appts):
status = True
earliest, latest = date.today(), date.today()
for appt in appts:
dates = get_dates(appt)
begin_date = dates['begin_date']
end_date = dates['end_date']
if latest == date.today():
latest = end_date
appt['begin_date'] = begin_date
appt['end_date'] = end_date
if begin_date < earliest:
earliest = begin_date
if end_date > latest:
latest = end_date
datearray = []
timespan = latest - earliest
for x in range(0, timespan.days):
datearray.append(earliest + timedelta(days=x))
loading = [0] * len(datearray)
for day in datearray:
for appt in appts:
if appt['begin_date'] <= day <= appt["end_date"]:
loading[datearray.index(day)] = loading[datearray.index(day)] + \
appt.get("loading")
if max(loading) > 1.0:
status = False
print("max {} at {}".format(max(loading),
datearray[
list(loading).index(max(loading))]))
elif min(loading) < 1.0:
status = False
print("min {} at {}".format(min(loading),
datearray[list(loading).index(min(loading))]
))
return status
def group(db, by):
"""
Group the document in the database according to the value of the doc[by] in db.
Parameters
----------
db : iterable
The database of documents.
by : basestring
The key to group the documents.
Returns
-------
grouped: dict
A dictionary mapping the feature value of group to the list of docs. All docs in the same generator have
the same value of doc[by].
Examples
--------
Here, we use a tuple of dict as an example of the database.
>>> db = ({"k": "v0"}, {"k": "v1"}, {"k": "v0"})
>>> group(db)
This will return
>>> {"v0": [{"k": "v0"}, {"k": "v0"}], "v1": [{"k": "v1"}]}
"""
grouped = {}
doc: dict
for doc in db:
key = doc.get(by)
if not key:
print("There is no field {} in {}".format(by, id_key(doc)))
elif key not in grouped:
grouped[key] = [doc]
else:
grouped[key].append(doc)
return grouped
def get_pi_id(rc):
"""
Gets the database id of the group PI
Parameters
----------
rc: runcontrol object
The runcontrol object. It must contain the 'groups' and 'people'
collections in the needed databases
Returns
-------
The database '_id' of the group PI
"""
groupiter = list(all_docs_from_collection(rc.client, "groups"))
peoplecoll = all_docs_from_collection(rc.client, "people")
pi_ref = [i.get("pi_name") for i in groupiter if
i.get("name").casefold() == rc.groupname.casefold()]
pi = fuzzy_retrieval(peoplecoll, ["_id", "aka", "name"], pi_ref[0])
return pi.get("_id")
# MASKED: group_member_ids function (lines 862-895)
|
def group_member_ids(ppl_coll, grpname):
"""Get a list of all group member ids
Parameters
----------
ppl_coll: collection (list of dicts)
The people collection that should contain the group members
grp: string
The id of the group in groups.yml
Returns
-------
set:
The set of ids of the people in the group
Notes
-----
- Groups that are being tracked are listed in the groups.yml collection
with a name and an id.
- People are in a group during an educational or employment period.
- To assign a person to a tracked group during one such period, add
a "group" key to that education/employment item with a value
that is the group id.
- This function takes the group id that is passed and searches
the people collection for all people that have been
assigned to that group in some period of time and returns a list of
"""
grpmembers = set()
for person in ppl_coll:
for k in ["education", "employment"]:
for position in person.get(k, {}):
if position.get("group", None) == grpname:
grpmembers.add(person["_id"])
return grpmembers
| 862 | 895 |
"""Misc. regolith tools.
"""
import email.utils
import os
import platform
import re
import sys
import time
from copy import deepcopy
from calendar import monthrange
from datetime import datetime, date, timedelta
from regolith.dates import month_to_int, date_to_float, get_dates
from regolith.sorters import doc_date_key, id_key, ene_date_key
from regolith.chained_db import ChainDB
try:
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
HAVE_BIBTEX_PARSER = True
except ImportError:
HAVE_BIBTEX_PARSER = False
LATEX_OPTS = ["-halt-on-error", "-file-line-error"]
if sys.version_info[0] >= 3:
string_types = (str, bytes)
unicode_type = str
else:
pass
# string_types = (str, unicode)
# unicode_type = unicode
DEFAULT_ENCODING = sys.getdefaultencoding()
ON_WINDOWS = platform.system() == "Windows"
ON_MAC = platform.system() == "Darwin"
ON_LINUX = platform.system() == "Linux"
ON_POSIX = os.name == "posix"
def dbdirname(db, rc):
"""Gets the database dir name."""
if db.get("local", False) is False:
dbsdir = os.path.join(rc.builddir, "_dbs")
dbdir = os.path.join(dbsdir, db["name"])
else:
dbdir = db["url"]
return dbdir
def dbpathname(db, rc):
"""Gets the database path name."""
dbdir = dbdirname(db, rc)
dbpath = os.path.join(dbdir, db["path"])
return dbpath
def fallback(cond, backup):
"""Decorator for returning the object if cond is true and a backup if
cond is false. """
def dec(obj):
return obj if cond else backup
return dec
def all_docs_from_collection(client, collname, copy=True):
"""Yield all entries in for all collections of a given name in a given
database. """
yield from client.all_documents(collname, copy=copy)
SHORT_MONTH_NAMES = (
None,
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sept",
"Oct",
"Nov",
"Dec",
)
def date_to_rfc822(y, m, d=1):
"""Converts a date to an RFC 822 formatted string."""
d = datetime(int(y), month_to_int(m), int(d))
return email.utils.format_datetime(d)
def rfc822now():
"""Creates a string of the current time according to RFC 822."""
now = datetime.utcnow()
return email.utils.format_datetime(now)
def gets(seq, key, default=None):
"""Gets a key from every element of a sequence if possible."""
for x in seq:
yield x.get(key, default)
def month_and_year(m=None, y=None):
"""Creates a string from month and year data, if available."""
if y is None:
return "present"
if m is None:
return str(y)
m = month_to_int(m)
return "{0} {1}".format(SHORT_MONTH_NAMES[m], y)
def is_since(y, sy, m=1, d=1, sm=1, sd=1):
"""
tests whether a date is on or since another date
Parameters
----------
y : int
the year to be tested
sy : int
the since year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
sd: int
the since day. Optional, defaults to 1
Returns
-------
True if the target date is the same as, or more recent than, the since date
"""
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
d = "{}/{}/{}".format(d, month_to_int(m), y)
since = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return since <= date
def is_before(y, by, m=12, d=None, bm=12, bd=None):
"""
tests whether a date is on or before another date
Parameters
----------
y : int
the year to be tested
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Dec
d : int
the day to be tested. Defaults to last day of the month
bm : int or str
the before month. Optional, defaults to Dec
bd: int
the before day. Optional, defaults to last day of the month
Returns
-------
True if the target date is the same as, or earlier than, the before date
"""
if not d:
d = monthrange(y, month_to_int(m))[1]
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
b = "{}/{}/{}".format(bd, month_to_int(bm), by)
d = "{}/{}/{}".format(d, month_to_int(m), y)
before = time.mktime(datetime.strptime(b, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return before >= date
def is_between(y, sy, by, m=1, d=1, sm=1, sd=1, bm=12, bd=None):
"""
tests whether a date is on or between two other dates
returns true if the target date is between the since date and the before
date, inclusive.
Parameters
----------
y : int
the year to be tested
sy : int
the since year
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
bm : int or str
the before month. Optional, defaults to Dec
sd: int
the since day. Optional, defaults to 1
bd: int
the before day. Optional, defaults to 28
Returns
-------
True if the target date is between the since date and the before date,
inclusive (i.e., returns true if the target date is the same as either the
since date or the before date)
"""
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
return is_since(y, sy, m=m, d=d, sm=sm, sd=sd) and is_before(
y, by, m=m, d=d, bm=bm, bd=bd
)
def has_started(sy, sm=None, sd=None):
"""
true if today is after the dates given, inclusive
Parameters
----------
sy : int
the year to check today against
sm : int or str.
the month to check today against. Should be integer or in regolith MONTHS.
default is 1
sd : int.
the day to check today against. Default is 1
Returns
-------
bool
true if today is after dates given
"""
if not sm:
sm = 1
if not sd:
sd = 1
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
start = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
return start <= time.time()
def has_finished(ey, em=None, ed=None):
"""
true if today is before the dates given, inclusive
Parameters
----------
ey : int
end year, the year to check today against
em : int or str.
end month, the month to check today against. Should be integer or in regolith MONTHS.
default is 1
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
if not em:
em = 12
if not ed:
ed = monthrange(ey, month_to_int(em))[1]
e = "{}/{}/{}".format(ed, month_to_int(em), ey)
end = time.mktime(datetime.strptime(e, "%d/%m/%Y").timetuple())
return end <= time.time()
def is_current(sy, ey, sm=None, sd=None, em=None, ed=None):
"""
true if today is between the dates given, inclusive
Parameters
----------
sy : int
start year, the year to check today is after
ey : int
end year, the year to check today is before
sm : int or str
start month, the month to check today is after. Should be integer or in
regolith MONTHS. Default is 1
sd : int
start day, the day to check today after. Default is 1
em : int or str.
end month, the month to check today against. Should be integer or in
regolith MONTHS. Default is 12
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
return has_started(sy, sm, sd) and not has_finished(ey, em, ed)
def filter_publications(citations, authors, reverse=False, bold=True):
"""Filter publications by the author(s)/editor(s)
Parameters
----------
citations : list of dict
The publication citations
authors : set of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
bold : bool, optional
If True put latex bold around the author(s) in question
"""
pubs = []
for pub in citations:
if (
len((set(pub.get("author", [])) | set(
pub.get("editor", []))) & authors)
== 0
):
continue
pub = deepcopy(pub)
if bold:
bold_self = []
for a in pub["author"]:
if a in authors:
bold_self.append("\\textbf{" + a + "}")
else:
bold_self.append(a)
pub["author"] = bold_self
else:
pub = deepcopy(pub)
pubs.append(pub)
pubs.sort(key=doc_date_key, reverse=reverse)
return pubs
def filter_projects(projects, authors, reverse=False):
"""Filter projects by the author(s)
Parameters
----------
projects : list of dict
The publication citations
authors : set of list of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
"""
projs = []
for proj in projects:
team_names = set(gets(proj["team"], "name"))
if len(team_names & authors) == 0:
continue
# FIXME delete these lines if not required. I think they are wrong (SJLB)
# proj = dict(proj)
# proj["team"] = [x for x in proj["team"] if x["name"] in authors]
projs.append(proj)
projs.sort(key=id_key, reverse=reverse)
return projs
def filter_grants(input_grants, names, pi=True, reverse=True, multi_pi=False):
"""Filter grants by those involved
Parameters
----------
input_grants : list of dict
The grants to filter
names : set of str
The authors to be filtered against
pi : bool, optional
If True add the grant amount to that person's total amount
reverse : bool, optional
If True reverse the order, defaults to False
multi_pi : bool, optional
If True compute sub-awards for multi PI grants, defaults to False
"""
grants = []
total_amount = 0.0
subaward_amount = 0.0
for grant in input_grants:
team_names = set(gets(grant["team"], "name"))
if len(team_names & names) == 0:
continue
grant = deepcopy(grant)
person = [x for x in grant["team"] if x["name"] in names][0]
if pi:
if person["position"].lower() == "pi":
total_amount += grant["amount"]
else:
continue
elif multi_pi:
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["multi_pi"] = any(gets(grant["team"], "subaward_amount"))
else:
if person["position"].lower() == "pi":
continue
else:
total_amount += grant["amount"]
subaward_amount += person.get("subaward_amount", 0.0)
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["pi"] = [
x for x in grant["team"] if x["position"].lower() == "pi"
][0]
grant["me"] = person
grants.append(grant)
grants.sort(key=ene_date_key, reverse=reverse)
return grants, total_amount, subaward_amount
def awards_grants_honors(p):
"""Make sorted awards grants and honors list.
Parameters
----------
p : dict
The person entry
"""
aghs = []
for x in p.get("funding", ()):
d = {
"description": "{0} ({1}{2:,})".format(
latex_safe(x["name"]),
x.get("currency", "$").replace("$", "\$"),
x["value"],
),
"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0)),
}
aghs.append(d)
for x in p.get("service", []) + p.get("honors", []):
d = {"description": latex_safe(x["name"])}
if "year" in x:
d.update(
{"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0))}
)
elif "begin_year" in x and "end_year" in x:
d.update(
{
"year": "{}-{}".format(x["begin_year"], x["end_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
elif "begin_year" in x:
d.update(
{
"year": "{}".format(x["begin_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
aghs.append(d)
aghs.sort(key=(lambda x: x.get("_key", 0.0)), reverse=True)
return aghs
HTTP_RE = re.compile(
r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
)
def latex_safe_url(s):
"""Makes a string that is a URL latex safe."""
return s.replace("#", r"\#")
def latex_safe(s, url_check=True, wrapper="url"):
"""Make string latex safe
Parameters
----------
s : str
url_check : bool, optional
If True check for URLs and wrap them, if False check for URL but don't
wrap, defaults to True
wrapper : str, optional
The wrapper for wrapping urls defaults to url
"""
if not s:
return s
if url_check:
# If it looks like a URL make it a latex URL
url_search = HTTP_RE.search(s)
if url_search:
url = r"{start}\{wrapper}{{{s}}}{end}".format(
start=(latex_safe(s[: url_search.start()])),
end=(latex_safe(s[url_search.end():])),
wrapper=wrapper,
s=latex_safe_url(s[url_search.start(): url_search.end()]),
)
return url
return (
s.replace("&", r"\&")
.replace("$", r"\$")
.replace("#", r"\#")
.replace("_", r"\_")
)
def make_bibtex_file(pubs, pid, person_dir="."):
"""Make a bibtex file given the publications
Parameters
----------
pubs : list of dict
The publications
pid : str
The person id
person_dir : str, optional
The person's directory
"""
if not HAVE_BIBTEX_PARSER:
return None
skip_keys = {"ID", "ENTRYTYPE", "author"}
bibdb = BibDatabase()
bibwriter = BibTexWriter()
bibdb.entries = ents = []
for pub in pubs:
ent = dict(pub)
ent["ID"] = ent.pop("_id")
ent["ENTRYTYPE"] = ent.pop("entrytype")
for n in ["author", "editor"]:
if n in ent:
ent[n] = " and ".join(ent[n])
for key in ent.keys():
if key in skip_keys:
continue
ent[key] = latex_safe(str(ent[key]))
ents.append(ent)
fname = os.path.join(person_dir, pid) + ".bib"
with open(fname, "w", encoding="utf-8") as f:
f.write(bibwriter.write(bibdb))
return fname
def document_by_value(documents, address, value):
"""Get a specific document by one of its values
Parameters
----------
documents: generator
Generator which yields the documents
address: str or tuple
The address of the data in the document
value: any
The expected value for the document
Returns
-------
dict:
The first document which matches the request
"""
if isinstance(address, str):
address = (address,)
for g_doc in documents:
doc = deepcopy(g_doc)
for add in address:
doc = doc[add]
if doc == value:
return g_doc
def fuzzy_retrieval(documents, sources, value, case_sensitive=True):
"""Retrieve a document from the documents where value is compared against
multiple potential sources
Parameters
----------
documents: generator
The documents
sources: iterable
The potential data sources
value:
The value to compare against to find the document of interest
case_sensitive: Bool
When true will match case (Default = True)
Returns
-------
dict:
The document
Examples
--------
>>> fuzzy_retrieval(people, ['aka', 'name'], 'pi_name', case_sensitive = False)
This would get the person entry for which either the alias or the name was
``pi_name``.
"""
for doc in documents:
returns = []
for k in sources:
ret = doc.get(k, [])
if not isinstance(ret, list):
ret = [ret]
returns.extend(ret)
if not case_sensitive:
returns = [reti.lower() for reti in returns if
isinstance(reti, str)]
if isinstance(value, str):
if value.lower() in frozenset(returns):
return doc
else:
if value in frozenset(returns):
return doc
def number_suffix(number):
"""returns the suffix that adjectivises a number (st, nd, rd, th)
Paramters
---------
number: integer
The number. If number is not an integer, returns an empty string
Returns
-------
suffix: string
The suffix (st, nd, rd, th)
"""
if not isinstance(number, (int, float)):
return ""
if 10 < number < 20:
suffix = "th"
else:
suffix = {1: "st", 2: "nd", 3: "rd"}.get(number % 10, "th")
return suffix
def dereference_institution(input_record, institutions):
"""Tool for replacing placeholders for institutions with the actual
institution data. Note that the replacement is done inplace
Parameters
----------
input_record : dict
The record to dereference
institutions : iterable of dicts
The institutions
"""
inst = input_record.get("institution") or input_record.get("organization")
if not inst:
error = input_record.get("position") or input_record.get("degree")
print("WARNING: no institution or organization but found {}".format(
error))
db_inst = fuzzy_retrieval(institutions, ["name", "_id", "aka"], inst)
if db_inst:
input_record["institution"] = db_inst["name"]
input_record["organization"] = db_inst["name"]
if db_inst.get("country") == "USA":
state_country = db_inst.get("state")
else:
state_country = db_inst.get("country")
input_record["location"] = "{}, {}".format(db_inst["city"],
state_country)
if not db_inst.get("departments"):
print("WARNING: no departments in {}. {} sought".format(
db_inst.get("_id"), inst))
if "department" in input_record and db_inst.get("departments"):
input_record["department"] = fuzzy_retrieval(
[db_inst["departments"]], ["name", "aka"],
input_record["department"]
)
else:
input_record["department"] = inst
def merge_collections(a, b, target_id):
"""
merge two collections into a single merged collection
for keys that are in both collections, the value in b will be kept
Parameters
----------
a the inferior collection (will lose values of shared keys)
b the superior collection (will keep values of shared keys)
target_id str the name of the key used in b to dereference ids in a
Returns
-------
the combined collection. Note that it returns a collection only containing
merged items from a and b that are dereferenced in b, i.e., the merged
intercept. If you want the union you can update the returned collection
with a.
Examples
--------
>>> grants = merge_collections(self.gtx["proposals"], self.gtx["grants"], "proposal_id")
This would merge all entries in the proposals collection with entries in the
grants collection for which "_id" in proposals has the value of
"proposal_id" in grants.
"""
adict = {}
for k in a:
adict[k.get("_id")] = k
bdict = {}
for k in b:
bdict[k.get("_id")] = k
b_for_a = {}
for k in adict:
for kk, v in bdict.items():
if v.get(target_id, "") == k:
b_for_a[k] = kk
chained = {}
for k, v in b_for_a.items():
chained[k] = ChainDB(adict[k], bdict[v])
return list(chained.values())
def update_schemas(default_schema, user_schema):
"""
Merging the user schema into the default schema recursively and return the
merged schema. The default schema and user schema will not be modified
during the merging.
Parameters
----------
default_schema : dict
The default schema.
user_schema : dict
The user defined schema.
Returns
-------
updated_schema : dict
The merged schema.
"""
updated_schema = deepcopy(default_schema)
for key in user_schema.keys():
if (key in updated_schema) and isinstance(updated_schema[key],
dict) and isinstance(
user_schema[key], dict):
updated_schema[key] = update_schemas(updated_schema[key],
user_schema[key])
else:
updated_schema[key] = user_schema[key]
return updated_schema
def is_fully_loaded(appts):
status = True
earliest, latest = date.today(), date.today()
for appt in appts:
dates = get_dates(appt)
begin_date = dates['begin_date']
end_date = dates['end_date']
if latest == date.today():
latest = end_date
appt['begin_date'] = begin_date
appt['end_date'] = end_date
if begin_date < earliest:
earliest = begin_date
if end_date > latest:
latest = end_date
datearray = []
timespan = latest - earliest
for x in range(0, timespan.days):
datearray.append(earliest + timedelta(days=x))
loading = [0] * len(datearray)
for day in datearray:
for appt in appts:
if appt['begin_date'] <= day <= appt["end_date"]:
loading[datearray.index(day)] = loading[datearray.index(day)] + \
appt.get("loading")
if max(loading) > 1.0:
status = False
print("max {} at {}".format(max(loading),
datearray[
list(loading).index(max(loading))]))
elif min(loading) < 1.0:
status = False
print("min {} at {}".format(min(loading),
datearray[list(loading).index(min(loading))]
))
return status
def group(db, by):
"""
Group the document in the database according to the value of the doc[by] in db.
Parameters
----------
db : iterable
The database of documents.
by : basestring
The key to group the documents.
Returns
-------
grouped: dict
A dictionary mapping the feature value of group to the list of docs. All docs in the same generator have
the same value of doc[by].
Examples
--------
Here, we use a tuple of dict as an example of the database.
>>> db = ({"k": "v0"}, {"k": "v1"}, {"k": "v0"})
>>> group(db)
This will return
>>> {"v0": [{"k": "v0"}, {"k": "v0"}], "v1": [{"k": "v1"}]}
"""
grouped = {}
doc: dict
for doc in db:
key = doc.get(by)
if not key:
print("There is no field {} in {}".format(by, id_key(doc)))
elif key not in grouped:
grouped[key] = [doc]
else:
grouped[key].append(doc)
return grouped
def get_pi_id(rc):
"""
Gets the database id of the group PI
Parameters
----------
rc: runcontrol object
The runcontrol object. It must contain the 'groups' and 'people'
collections in the needed databases
Returns
-------
The database '_id' of the group PI
"""
groupiter = list(all_docs_from_collection(rc.client, "groups"))
peoplecoll = all_docs_from_collection(rc.client, "people")
pi_ref = [i.get("pi_name") for i in groupiter if
i.get("name").casefold() == rc.groupname.casefold()]
pi = fuzzy_retrieval(peoplecoll, ["_id", "aka", "name"], pi_ref[0])
return pi.get("_id")
def group_member_ids(ppl_coll, grpname):
"""Get a list of all group member ids
Parameters
----------
ppl_coll: collection (list of dicts)
The people collection that should contain the group members
grp: string
The id of the group in groups.yml
Returns
-------
set:
The set of ids of the people in the group
Notes
-----
- Groups that are being tracked are listed in the groups.yml collection
with a name and an id.
- People are in a group during an educational or employment period.
- To assign a person to a tracked group during one such period, add
a "group" key to that education/employment item with a value
that is the group id.
- This function takes the group id that is passed and searches
the people collection for all people that have been
assigned to that group in some period of time and returns a list of
"""
grpmembers = set()
for person in ppl_coll:
for k in ["education", "employment"]:
for position in person.get(k, {}):
if position.get("group", None) == grpname:
grpmembers.add(person["_id"])
return grpmembers
|
__init__
|
Monitor qcodes parameters.
Args:
*parameters: Parameters to monitor.
interval: How often one wants to refresh the values.
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 unga <[email protected]>
#
# Distributed under terms of the MIT license.
"""
Monitor a set of parameters in a background thread
stream output over websocket
To start monitor, run this file, or if qcodes is installed as a module:
``% python -m qcodes.monitor.monitor``
Add parameters to monitor in your measurement by creating a new monitor with a
list of parameters to monitor:
``monitor = qcodes.Monitor(param1, param2, param3, ...)``
"""
import sys
import logging
import os
import time
import json
from contextlib import suppress
from typing import Dict, Union, Any, Optional, Sequence, Callable, Awaitable
from collections import defaultdict
import asyncio
from asyncio import CancelledError
from threading import Thread, Event
import socketserver
import webbrowser
import websockets
from qcodes.instrument.parameter import Parameter
if sys.version_info < (3, 7):
all_tasks = asyncio.Task.all_tasks
else:
all_tasks = asyncio.all_tasks
WEBSOCKET_PORT = 5678
SERVER_PORT = 3000
log = logging.getLogger(__name__)
def _get_metadata(*parameters: Parameter) -> Dict[str, Any]:
"""
Return a dictionary that contains the parameter metadata grouped by the
instrument it belongs to.
"""
metadata_timestamp = time.time()
# group metadata by instrument
metas: dict = defaultdict(list)
for parameter in parameters:
# Get the latest value from the parameter,
# respecting the max_val_age parameter
meta: Dict[str, Optional[Union[float, str]]] = {}
meta["value"] = str(parameter.get_latest())
timestamp = parameter.get_latest.get_timestamp()
if timestamp is not None:
meta["ts"] = timestamp.timestamp()
else:
meta["ts"] = None
meta["name"] = parameter.label or parameter.name
meta["unit"] = parameter.unit
# find the base instrument that this parameter belongs to
baseinst = parameter.root_instrument
if baseinst is None:
metas["Unbound Parameter"].append(meta)
else:
metas[str(baseinst)].append(meta)
# Create list of parameters, grouped by instrument
parameters_out = []
for instrument in metas:
temp = {"instrument": instrument, "parameters": metas[instrument]}
parameters_out.append(temp)
state = {"ts": metadata_timestamp, "parameters": parameters_out}
return state
def _handler(parameters: Sequence[Parameter], interval: float) \
-> Callable[[websockets.WebSocketServerProtocol, str], Awaitable[None]]:
"""
Return the websockets server handler.
"""
async def server_func(websocket: websockets.WebSocketServerProtocol, _: str) -> None:
"""
Create a websockets handler that sends parameter values to a listener
every "interval" seconds.
"""
while True:
try:
# Update the parameter values
try:
meta = _get_metadata(*parameters)
except ValueError:
log.exception("Error getting parameters")
break
log.debug("sending.. to %r", websocket)
await websocket.send(json.dumps(meta))
# Wait for interval seconds and then send again
await asyncio.sleep(interval)
except (CancelledError, websockets.exceptions.ConnectionClosed):
log.debug("Got CancelledError or ConnectionClosed",
exc_info=True)
break
log.debug("Closing websockets connection")
return server_func
class Monitor(Thread):
"""
QCodes Monitor - WebSockets server to monitor qcodes parameters.
"""
running = None
# MASKED: __init__ function (lines 128-162)
def run(self) -> None:
"""
Start the event loop and run forever.
"""
log.debug("Running Websocket server")
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
try:
server_start = websockets.serve(self.handler, '127.0.0.1',
WEBSOCKET_PORT, close_timeout=1)
self.server = self.loop.run_until_complete(server_start)
self.server_is_started.set()
self.loop.run_forever()
except OSError:
# The code above may throw an OSError
# if the socket cannot be bound
log.exception("Server could not be started")
finally:
log.debug("loop stopped")
log.debug("Pending tasks at close: %r",
all_tasks(self.loop))
self.loop.close()
log.debug("loop closed")
self.loop_is_closed.set()
def update_all(self) -> None:
"""
Update all parameters in the monitor.
"""
for parameter in self._parameters:
# call get if it can be called without arguments
with suppress(TypeError):
parameter.get()
def stop(self) -> None:
"""
Shutdown the server, close the event loop and join the thread.
Setting active Monitor to ``None``.
"""
self.join()
Monitor.running = None
async def __stop_server(self) -> None:
log.debug("asking server %r to close", self.server)
if self.server is not None:
self.server.close()
log.debug("waiting for server to close")
if self.loop is not None and self.server is not None:
await self.loop.create_task(self.server.wait_closed())
log.debug("stopping loop")
if self.loop is not None:
log.debug("Pending tasks at stop: %r",
all_tasks(self.loop))
self.loop.stop()
def join(self, timeout: Optional[float] = None) -> None:
"""
Overwrite ``Thread.join`` to make sure server is stopped before
joining avoiding a potential deadlock.
"""
log.debug("Shutting down server")
if not self.is_alive():
# we run this check before trying to run to prevent a cryptic
# error message
log.debug("monitor is dead")
return
try:
if self.loop is not None:
asyncio.run_coroutine_threadsafe(self.__stop_server(),
self.loop)
except RuntimeError:
# the above may throw a runtime error if the loop is already
# stopped in which case there is nothing more to do
log.exception("Could not close loop")
self.loop_is_closed.wait(timeout=5)
if not self.loop_is_closed.is_set():
raise RuntimeError("Failed to join loop")
log.debug("Loop reported closed")
super().join(timeout=timeout)
log.debug("Monitor Thread has joined")
@staticmethod
def show() -> None:
"""
Overwrite this method to show/raise your monitor GUI
F.ex.
::
import webbrowser
url = "localhost:3000"
# Open URL in new window, raising the window if possible.
webbrowser.open_new(url)
"""
webbrowser.open("http://localhost:{}".format(SERVER_PORT))
if __name__ == "__main__":
import http.server
# If this file is run, create a simple webserver that serves a simple
# website that can be used to view monitored parameters.
STATIC_DIR = os.path.join(os.path.dirname(__file__), 'dist')
os.chdir(STATIC_DIR)
try:
log.info("Starting HTTP Server at http://localhost:%i", SERVER_PORT)
with socketserver.TCPServer(("", SERVER_PORT),
http.server.SimpleHTTPRequestHandler) as httpd:
log.debug("serving directory %s", STATIC_DIR)
webbrowser.open("http://localhost:{}".format(SERVER_PORT))
httpd.serve_forever()
except KeyboardInterrupt:
log.info("Shutting Down HTTP Server")
|
def __init__(self, *parameters: Parameter, interval: float = 1):
"""
Monitor qcodes parameters.
Args:
*parameters: Parameters to monitor.
interval: How often one wants to refresh the values.
"""
super().__init__()
# Check that all values are valid parameters
for parameter in parameters:
if not isinstance(parameter, Parameter):
raise TypeError(f"We can only monitor QCodes "
f"Parameters, not {type(parameter)}")
self.loop: Optional[asyncio.AbstractEventLoop] = None
self.server: Optional[websockets.WebSocketServer] = None
self._parameters = parameters
self.loop_is_closed = Event()
self.server_is_started = Event()
self.handler = _handler(parameters, interval=interval)
log.debug("Start monitoring thread")
if Monitor.running:
# stop the old server
log.debug("Stopping and restarting server")
Monitor.running.stop()
self.start()
# Wait until the loop is running
self.server_is_started.wait(timeout=5)
if not self.server_is_started.is_set():
raise RuntimeError("Failed to start server")
Monitor.running = self
| 128 | 162 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 unga <[email protected]>
#
# Distributed under terms of the MIT license.
"""
Monitor a set of parameters in a background thread
stream output over websocket
To start monitor, run this file, or if qcodes is installed as a module:
``% python -m qcodes.monitor.monitor``
Add parameters to monitor in your measurement by creating a new monitor with a
list of parameters to monitor:
``monitor = qcodes.Monitor(param1, param2, param3, ...)``
"""
import sys
import logging
import os
import time
import json
from contextlib import suppress
from typing import Dict, Union, Any, Optional, Sequence, Callable, Awaitable
from collections import defaultdict
import asyncio
from asyncio import CancelledError
from threading import Thread, Event
import socketserver
import webbrowser
import websockets
from qcodes.instrument.parameter import Parameter
if sys.version_info < (3, 7):
all_tasks = asyncio.Task.all_tasks
else:
all_tasks = asyncio.all_tasks
WEBSOCKET_PORT = 5678
SERVER_PORT = 3000
log = logging.getLogger(__name__)
def _get_metadata(*parameters: Parameter) -> Dict[str, Any]:
"""
Return a dictionary that contains the parameter metadata grouped by the
instrument it belongs to.
"""
metadata_timestamp = time.time()
# group metadata by instrument
metas: dict = defaultdict(list)
for parameter in parameters:
# Get the latest value from the parameter,
# respecting the max_val_age parameter
meta: Dict[str, Optional[Union[float, str]]] = {}
meta["value"] = str(parameter.get_latest())
timestamp = parameter.get_latest.get_timestamp()
if timestamp is not None:
meta["ts"] = timestamp.timestamp()
else:
meta["ts"] = None
meta["name"] = parameter.label or parameter.name
meta["unit"] = parameter.unit
# find the base instrument that this parameter belongs to
baseinst = parameter.root_instrument
if baseinst is None:
metas["Unbound Parameter"].append(meta)
else:
metas[str(baseinst)].append(meta)
# Create list of parameters, grouped by instrument
parameters_out = []
for instrument in metas:
temp = {"instrument": instrument, "parameters": metas[instrument]}
parameters_out.append(temp)
state = {"ts": metadata_timestamp, "parameters": parameters_out}
return state
def _handler(parameters: Sequence[Parameter], interval: float) \
-> Callable[[websockets.WebSocketServerProtocol, str], Awaitable[None]]:
"""
Return the websockets server handler.
"""
async def server_func(websocket: websockets.WebSocketServerProtocol, _: str) -> None:
"""
Create a websockets handler that sends parameter values to a listener
every "interval" seconds.
"""
while True:
try:
# Update the parameter values
try:
meta = _get_metadata(*parameters)
except ValueError:
log.exception("Error getting parameters")
break
log.debug("sending.. to %r", websocket)
await websocket.send(json.dumps(meta))
# Wait for interval seconds and then send again
await asyncio.sleep(interval)
except (CancelledError, websockets.exceptions.ConnectionClosed):
log.debug("Got CancelledError or ConnectionClosed",
exc_info=True)
break
log.debug("Closing websockets connection")
return server_func
class Monitor(Thread):
"""
QCodes Monitor - WebSockets server to monitor qcodes parameters.
"""
running = None
def __init__(self, *parameters: Parameter, interval: float = 1):
"""
Monitor qcodes parameters.
Args:
*parameters: Parameters to monitor.
interval: How often one wants to refresh the values.
"""
super().__init__()
# Check that all values are valid parameters
for parameter in parameters:
if not isinstance(parameter, Parameter):
raise TypeError(f"We can only monitor QCodes "
f"Parameters, not {type(parameter)}")
self.loop: Optional[asyncio.AbstractEventLoop] = None
self.server: Optional[websockets.WebSocketServer] = None
self._parameters = parameters
self.loop_is_closed = Event()
self.server_is_started = Event()
self.handler = _handler(parameters, interval=interval)
log.debug("Start monitoring thread")
if Monitor.running:
# stop the old server
log.debug("Stopping and restarting server")
Monitor.running.stop()
self.start()
# Wait until the loop is running
self.server_is_started.wait(timeout=5)
if not self.server_is_started.is_set():
raise RuntimeError("Failed to start server")
Monitor.running = self
def run(self) -> None:
"""
Start the event loop and run forever.
"""
log.debug("Running Websocket server")
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
try:
server_start = websockets.serve(self.handler, '127.0.0.1',
WEBSOCKET_PORT, close_timeout=1)
self.server = self.loop.run_until_complete(server_start)
self.server_is_started.set()
self.loop.run_forever()
except OSError:
# The code above may throw an OSError
# if the socket cannot be bound
log.exception("Server could not be started")
finally:
log.debug("loop stopped")
log.debug("Pending tasks at close: %r",
all_tasks(self.loop))
self.loop.close()
log.debug("loop closed")
self.loop_is_closed.set()
def update_all(self) -> None:
"""
Update all parameters in the monitor.
"""
for parameter in self._parameters:
# call get if it can be called without arguments
with suppress(TypeError):
parameter.get()
def stop(self) -> None:
"""
Shutdown the server, close the event loop and join the thread.
Setting active Monitor to ``None``.
"""
self.join()
Monitor.running = None
async def __stop_server(self) -> None:
log.debug("asking server %r to close", self.server)
if self.server is not None:
self.server.close()
log.debug("waiting for server to close")
if self.loop is not None and self.server is not None:
await self.loop.create_task(self.server.wait_closed())
log.debug("stopping loop")
if self.loop is not None:
log.debug("Pending tasks at stop: %r",
all_tasks(self.loop))
self.loop.stop()
def join(self, timeout: Optional[float] = None) -> None:
"""
Overwrite ``Thread.join`` to make sure server is stopped before
joining avoiding a potential deadlock.
"""
log.debug("Shutting down server")
if not self.is_alive():
# we run this check before trying to run to prevent a cryptic
# error message
log.debug("monitor is dead")
return
try:
if self.loop is not None:
asyncio.run_coroutine_threadsafe(self.__stop_server(),
self.loop)
except RuntimeError:
# the above may throw a runtime error if the loop is already
# stopped in which case there is nothing more to do
log.exception("Could not close loop")
self.loop_is_closed.wait(timeout=5)
if not self.loop_is_closed.is_set():
raise RuntimeError("Failed to join loop")
log.debug("Loop reported closed")
super().join(timeout=timeout)
log.debug("Monitor Thread has joined")
@staticmethod
def show() -> None:
"""
Overwrite this method to show/raise your monitor GUI
F.ex.
::
import webbrowser
url = "localhost:3000"
# Open URL in new window, raising the window if possible.
webbrowser.open_new(url)
"""
webbrowser.open("http://localhost:{}".format(SERVER_PORT))
if __name__ == "__main__":
import http.server
# If this file is run, create a simple webserver that serves a simple
# website that can be used to view monitored parameters.
STATIC_DIR = os.path.join(os.path.dirname(__file__), 'dist')
os.chdir(STATIC_DIR)
try:
log.info("Starting HTTP Server at http://localhost:%i", SERVER_PORT)
with socketserver.TCPServer(("", SERVER_PORT),
http.server.SimpleHTTPRequestHandler) as httpd:
log.debug("serving directory %s", STATIC_DIR)
webbrowser.open("http://localhost:{}".format(SERVER_PORT))
httpd.serve_forever()
except KeyboardInterrupt:
log.info("Shutting Down HTTP Server")
|
assertDetailedTraceback
|
Assert that L{printDetailedTraceback} produces and prints a detailed
traceback.
The detailed traceback consists of a header::
*--- Failure #20 ---
The body contains the stacktrace::
/twisted/trial/_synctest.py:1180: _run(...)
/twisted/python/util.py:1076: runWithWarningsSuppressed(...)
--- <exception caught here> ---
/twisted/test/test_failure.py:39: getDivisionFailure(...)
If C{captureVars} is enabled the body also includes a list of
globals and locals::
[ Locals ]
exampleLocalVar : 'xyz'
...
( Globals )
...
Or when C{captureVars} is disabled::
[Capture of Locals and Globals disabled (use captureVars=True)]
When C{cleanFailure} is enabled references to other objects are removed
and replaced with strings.
And finally the footer with the L{Failure}'s value::
exceptions.ZeroDivisionError: float division
*--- End of Failure #20 ---
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
@param cleanFailure: Enables L{Failure.cleanFailure}.
@type cleanFailure: C{bool}
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for the L{twisted.python.failure} module.
"""
from __future__ import division, absolute_import
import re
import sys
import traceback
import pdb
import linecache
from twisted.python.compat import _PY3, NativeStringIO
from twisted.python import reflect
from twisted.python import failure
from twisted.trial.unittest import SkipTest, SynchronousTestCase
try:
from twisted.test import raiser
except ImportError:
raiser = None
def getDivisionFailure(*args, **kwargs):
"""
Make a C{Failure} of a divide-by-zero error.
@param args: Any C{*args} are passed to Failure's constructor.
@param kwargs: Any C{**kwargs} are passed to Failure's constructor.
"""
try:
1/0
except:
f = failure.Failure(*args, **kwargs)
return f
class FailureTests(SynchronousTestCase):
"""
Tests for L{failure.Failure}.
"""
def test_failAndTrap(self):
"""
Trapping a L{Failure}.
"""
try:
raise NotImplementedError('test')
except:
f = failure.Failure()
error = f.trap(SystemExit, RuntimeError)
self.assertEqual(error, RuntimeError)
self.assertEqual(f.type, NotImplementedError)
def test_trapRaisesWrappedException(self):
"""
If the wrapped C{Exception} is not a subclass of one of the
expected types, L{failure.Failure.trap} raises the wrapped
C{Exception}.
"""
if not _PY3:
raise SkipTest(
"""
Only expected behaviour on Python 3.
@see U{http://twisted.readthedocs.io/en/latest/core/howto/python3.html#twisted-python-failure}
"""
)
exception = ValueError()
try:
raise exception
except:
f = failure.Failure()
untrapped = self.assertRaises(ValueError, f.trap, OverflowError)
self.assertIs(exception, untrapped)
def test_trapRaisesSelf(self):
"""
If the wrapped C{Exception} is not a subclass of one of the
expected types, L{failure.Failure.trap} raises itself.
"""
if _PY3:
raise SkipTest(
"""
Only expected behaviour on Python 2.
@see U{http://twisted.readthedocs.io/en/latest/core/howto/python3.html#twisted-python-failure}
"""
)
exception = ValueError()
try:
raise exception
except:
f = failure.Failure()
untrapped = self.assertRaises(failure.Failure, f.trap, OverflowError)
self.assertIs(f, untrapped)
def test_failureValueFromFailure(self):
"""
A L{failure.Failure} constructed from another
L{failure.Failure} instance, has its C{value} property set to
the value of that L{failure.Failure} instance.
"""
exception = ValueError()
f1 = failure.Failure(exception)
f2 = failure.Failure(f1)
self.assertIs(f2.value, exception)
def test_failureValueFromFoundFailure(self):
"""
A L{failure.Failure} constructed without a C{exc_value}
argument, will search for an "original" C{Failure}, and if
found, its value will be used as the value for the new
C{Failure}.
"""
exception = ValueError()
f1 = failure.Failure(exception)
try:
f1.trap(OverflowError)
except:
f2 = failure.Failure()
self.assertIs(f2.value, exception)
def assertStartsWith(self, s, prefix):
"""
Assert that C{s} starts with a particular C{prefix}.
@param s: The input string.
@type s: C{str}
@param prefix: The string that C{s} should start with.
@type prefix: C{str}
"""
self.assertTrue(s.startswith(prefix),
'%r is not the start of %r' % (prefix, s))
def assertEndsWith(self, s, suffix):
"""
Assert that C{s} end with a particular C{suffix}.
@param s: The input string.
@type s: C{str}
@param suffix: The string that C{s} should end with.
@type suffix: C{str}
"""
self.assertTrue(s.endswith(suffix),
'%r is not the end of %r' % (suffix, s))
def assertTracebackFormat(self, tb, prefix, suffix):
"""
Assert that the C{tb} traceback contains a particular C{prefix} and
C{suffix}.
@param tb: The traceback string.
@type tb: C{str}
@param prefix: The string that C{tb} should start with.
@type prefix: C{str}
@param suffix: The string that C{tb} should end with.
@type suffix: C{str}
"""
self.assertStartsWith(tb, prefix)
self.assertEndsWith(tb, suffix)
# MASKED: assertDetailedTraceback function (lines 180-255)
def assertBriefTraceback(self, captureVars=False):
"""
Assert that L{printBriefTraceback} produces and prints a brief
traceback.
The brief traceback consists of a header::
Traceback: <type 'exceptions.ZeroDivisionError'>: float division
The body with the stacktrace::
/twisted/trial/_synctest.py:1180:_run
/twisted/python/util.py:1076:runWithWarningsSuppressed
And the footer::
--- <exception caught here> ---
/twisted/test/test_failure.py:39:getDivisionFailure
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
"""
if captureVars:
exampleLocalVar = 'abcde'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure()
out = NativeStringIO()
f.printBriefTraceback(out)
tb = out.getvalue()
stack = ''
for method, filename, lineno, localVars, globalVars in f.frames:
stack += '%s:%s:%s\n' % (filename, lineno, method)
zde = repr(ZeroDivisionError)
self.assertTracebackFormat(tb,
"Traceback: %s: " % (zde,),
"%s\n%s" % (failure.EXCEPTION_CAUGHT_HERE, stack))
if captureVars:
self.assertIsNone(re.search('exampleLocalVar.*abcde', tb))
def assertDefaultTraceback(self, captureVars=False):
"""
Assert that L{printTraceback} produces and prints a default traceback.
The default traceback consists of a header::
Traceback (most recent call last):
The body with traceback::
File "/twisted/trial/_synctest.py", line 1180, in _run
runWithWarningsSuppressed(suppress, method)
And the footer::
--- <exception caught here> ---
File "twisted/test/test_failure.py", line 39, in getDivisionFailure
1/0
exceptions.ZeroDivisionError: float division
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
"""
if captureVars:
exampleLocalVar = 'xyzzy'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure(captureVars=captureVars)
out = NativeStringIO()
f.printTraceback(out)
tb = out.getvalue()
stack = ''
for method, filename, lineno, localVars, globalVars in f.frames:
stack += ' File "%s", line %s, in %s\n' % (filename, lineno,
method)
stack += ' %s\n' % (linecache.getline(
filename, lineno).strip(),)
self.assertTracebackFormat(tb,
"Traceback (most recent call last):",
"%s\n%s%s: %s\n" % (failure.EXCEPTION_CAUGHT_HERE, stack,
reflect.qual(f.type), reflect.safe_str(f.value)))
if captureVars:
self.assertIsNone(re.search('exampleLocalVar.*xyzzy', tb))
def test_printDetailedTraceback(self):
"""
L{printDetailedTraceback} returns a detailed traceback including the
L{Failure}'s count.
"""
self.assertDetailedTraceback()
def test_printBriefTraceback(self):
"""
L{printBriefTraceback} returns a brief traceback.
"""
self.assertBriefTraceback()
def test_printTraceback(self):
"""
L{printTraceback} returns a traceback.
"""
self.assertDefaultTraceback()
def test_printDetailedTracebackCapturedVars(self):
"""
L{printDetailedTraceback} captures the locals and globals for its
stack frames and adds them to the traceback, when called on a
L{Failure} constructed with C{captureVars=True}.
"""
self.assertDetailedTraceback(captureVars=True)
def test_printBriefTracebackCapturedVars(self):
"""
L{printBriefTraceback} returns a brief traceback when called on a
L{Failure} constructed with C{captureVars=True}.
Local variables on the stack can not be seen in the resulting
traceback.
"""
self.assertBriefTraceback(captureVars=True)
def test_printTracebackCapturedVars(self):
"""
L{printTraceback} returns a traceback when called on a L{Failure}
constructed with C{captureVars=True}.
Local variables on the stack can not be seen in the resulting
traceback.
"""
self.assertDefaultTraceback(captureVars=True)
def test_printDetailedTracebackCapturedVarsCleaned(self):
"""
C{printDetailedTraceback} includes information about local variables on
the stack after C{cleanFailure} has been called.
"""
self.assertDetailedTraceback(captureVars=True, cleanFailure=True)
def test_invalidFormatFramesDetail(self):
"""
L{failure.format_frames} raises a L{ValueError} if the supplied
C{detail} level is unknown.
"""
self.assertRaises(ValueError, failure.format_frames, None, None,
detail='noisia')
def test_ExplictPass(self):
e = RuntimeError()
f = failure.Failure(e)
f.trap(RuntimeError)
self.assertEqual(f.value, e)
def _getInnermostFrameLine(self, f):
try:
f.raiseException()
except ZeroDivisionError:
tb = traceback.extract_tb(sys.exc_info()[2])
return tb[-1][-1]
else:
raise Exception(
"f.raiseException() didn't raise ZeroDivisionError!?")
def test_RaiseExceptionWithTB(self):
f = getDivisionFailure()
innerline = self._getInnermostFrameLine(f)
self.assertEqual(innerline, '1/0')
def test_stringExceptionConstruction(self):
"""
Constructing a C{Failure} with a string as its exception value raises
a C{TypeError}, as this is no longer supported as of Python 2.6.
"""
exc = self.assertRaises(TypeError, failure.Failure, "ono!")
self.assertIn("Strings are not supported by Failure", str(exc))
def test_ConstructionFails(self):
"""
Creating a Failure with no arguments causes it to try to discover the
current interpreter exception state. If no such state exists, creating
the Failure should raise a synchronous exception.
"""
if sys.version_info < (3, 0):
sys.exc_clear()
self.assertRaises(failure.NoCurrentExceptionError, failure.Failure)
def test_getTracebackObject(self):
"""
If the C{Failure} has not been cleaned, then C{getTracebackObject}
returns the traceback object that captured in its constructor.
"""
f = getDivisionFailure()
self.assertEqual(f.getTracebackObject(), f.tb)
def test_getTracebackObjectFromCaptureVars(self):
"""
C{captureVars=True} has no effect on the result of
C{getTracebackObject}.
"""
try:
1/0
except ZeroDivisionError:
noVarsFailure = failure.Failure()
varsFailure = failure.Failure(captureVars=True)
self.assertEqual(noVarsFailure.getTracebackObject(), varsFailure.tb)
def test_getTracebackObjectFromClean(self):
"""
If the Failure has been cleaned, then C{getTracebackObject} returns an
object that looks the same to L{traceback.extract_tb}.
"""
f = getDivisionFailure()
expected = traceback.extract_tb(f.getTracebackObject())
f.cleanFailure()
observed = traceback.extract_tb(f.getTracebackObject())
self.assertIsNotNone(expected)
self.assertEqual(expected, observed)
def test_getTracebackObjectFromCaptureVarsAndClean(self):
"""
If the Failure was created with captureVars, then C{getTracebackObject}
returns an object that looks the same to L{traceback.extract_tb}.
"""
f = getDivisionFailure(captureVars=True)
expected = traceback.extract_tb(f.getTracebackObject())
f.cleanFailure()
observed = traceback.extract_tb(f.getTracebackObject())
self.assertEqual(expected, observed)
def test_getTracebackObjectWithoutTraceback(self):
"""
L{failure.Failure}s need not be constructed with traceback objects. If
a C{Failure} has no traceback information at all, C{getTracebackObject}
just returns None.
None is a good value, because traceback.extract_tb(None) -> [].
"""
f = failure.Failure(Exception("some error"))
self.assertIsNone(f.getTracebackObject())
def test_tracebackFromExceptionInPython3(self):
"""
If a L{failure.Failure} is constructed with an exception but no
traceback in Python 3, the traceback will be extracted from the
exception's C{__traceback__} attribute.
"""
try:
1/0
except:
klass, exception, tb = sys.exc_info()
f = failure.Failure(exception)
self.assertIs(f.tb, tb)
def test_cleanFailureRemovesTracebackInPython3(self):
"""
L{failure.Failure.cleanFailure} sets the C{__traceback__} attribute of
the exception to L{None} in Python 3.
"""
f = getDivisionFailure()
self.assertIsNotNone(f.tb)
self.assertIs(f.value.__traceback__, f.tb)
f.cleanFailure()
self.assertIsNone(f.value.__traceback__)
if getattr(BaseException, "__traceback__", None) is None:
test_tracebackFromExceptionInPython3.skip = "Python 3 only."
test_cleanFailureRemovesTracebackInPython3.skip = "Python 3 only."
def test_repr(self):
"""
The C{repr} of a L{failure.Failure} shows the type and string
representation of the underlying exception.
"""
f = getDivisionFailure()
typeName = reflect.fullyQualifiedName(ZeroDivisionError)
self.assertEqual(
repr(f),
'<twisted.python.failure.Failure '
'%s: division by zero>' % (typeName,))
class BrokenStr(Exception):
"""
An exception class the instances of which cannot be presented as strings via
C{str}.
"""
def __str__(self):
# Could raise something else, but there's no point as yet.
raise self
class BrokenExceptionMetaclass(type):
"""
A metaclass for an exception type which cannot be presented as a string via
C{str}.
"""
def __str__(self):
raise ValueError("You cannot make a string out of me.")
class BrokenExceptionType(Exception, object):
"""
The aforementioned exception type which cnanot be presented as a string via
C{str}.
"""
__metaclass__ = BrokenExceptionMetaclass
class GetTracebackTests(SynchronousTestCase):
"""
Tests for L{Failure.getTraceback}.
"""
def _brokenValueTest(self, detail):
"""
Construct a L{Failure} with an exception that raises an exception from
its C{__str__} method and then call C{getTraceback} with the specified
detail and verify that it returns a string.
"""
x = BrokenStr()
f = failure.Failure(x)
traceback = f.getTraceback(detail=detail)
self.assertIsInstance(traceback, str)
def test_brokenValueBriefDetail(self):
"""
A L{Failure} might wrap an exception with a C{__str__} method which
raises an exception. In this case, calling C{getTraceback} on the
failure with the C{"brief"} detail does not raise an exception.
"""
self._brokenValueTest("brief")
def test_brokenValueDefaultDetail(self):
"""
Like test_brokenValueBriefDetail, but for the C{"default"} detail case.
"""
self._brokenValueTest("default")
def test_brokenValueVerboseDetail(self):
"""
Like test_brokenValueBriefDetail, but for the C{"default"} detail case.
"""
self._brokenValueTest("verbose")
def _brokenTypeTest(self, detail):
"""
Construct a L{Failure} with an exception type that raises an exception
from its C{__str__} method and then call C{getTraceback} with the
specified detail and verify that it returns a string.
"""
f = failure.Failure(BrokenExceptionType())
traceback = f.getTraceback(detail=detail)
self.assertIsInstance(traceback, str)
def test_brokenTypeBriefDetail(self):
"""
A L{Failure} might wrap an exception the type object of which has a
C{__str__} method which raises an exception. In this case, calling
C{getTraceback} on the failure with the C{"brief"} detail does not raise
an exception.
"""
self._brokenTypeTest("brief")
def test_brokenTypeDefaultDetail(self):
"""
Like test_brokenTypeBriefDetail, but for the C{"default"} detail case.
"""
self._brokenTypeTest("default")
def test_brokenTypeVerboseDetail(self):
"""
Like test_brokenTypeBriefDetail, but for the C{"verbose"} detail case.
"""
self._brokenTypeTest("verbose")
class FindFailureTests(SynchronousTestCase):
"""
Tests for functionality related to L{Failure._findFailure}.
"""
def test_findNoFailureInExceptionHandler(self):
"""
Within an exception handler, _findFailure should return
L{None} in case no Failure is associated with the current
exception.
"""
try:
1/0
except:
self.assertIsNone(failure.Failure._findFailure())
else:
self.fail("No exception raised from 1/0!?")
def test_findNoFailure(self):
"""
Outside of an exception handler, _findFailure should return None.
"""
if sys.version_info < (3, 0):
sys.exc_clear()
self.assertIsNone(sys.exc_info()[-1]) #environment sanity check
self.assertIsNone(failure.Failure._findFailure())
def test_findFailure(self):
"""
Within an exception handler, it should be possible to find the
original Failure that caused the current exception (if it was
caused by raiseException).
"""
f = getDivisionFailure()
f.cleanFailure()
try:
f.raiseException()
except:
self.assertEqual(failure.Failure._findFailure(), f)
else:
self.fail("No exception raised from raiseException!?")
def test_failureConstructionFindsOriginalFailure(self):
"""
When a Failure is constructed in the context of an exception
handler that is handling an exception raised by
raiseException, the new Failure should be chained to that
original Failure.
"""
f = getDivisionFailure()
f.cleanFailure()
try:
f.raiseException()
except:
newF = failure.Failure()
self.assertEqual(f.getTraceback(), newF.getTraceback())
else:
self.fail("No exception raised from raiseException!?")
def test_failureConstructionWithMungedStackSucceeds(self):
"""
Pyrex and Cython are known to insert fake stack frames so as to give
more Python-like tracebacks. These stack frames with empty code objects
should not break extraction of the exception.
"""
try:
raiser.raiseException()
except raiser.RaiserException:
f = failure.Failure()
self.assertTrue(f.check(raiser.RaiserException))
else:
self.fail("No exception raised from extension?!")
if raiser is None:
skipMsg = "raiser extension not available"
test_failureConstructionWithMungedStackSucceeds.skip = skipMsg
# On Python 3.5, extract_tb returns "FrameSummary" objects, which are almost
# like the old tuples. This being different does not affect the actual tests
# as we are testing that the input works, and that extract_tb returns something
# reasonable.
if sys.version_info < (3, 5):
_tb = lambda fn, lineno, name, text: (fn, lineno, name, text)
else:
from traceback import FrameSummary
_tb = lambda fn, lineno, name, text: FrameSummary(fn, lineno, name)
class FormattableTracebackTests(SynchronousTestCase):
"""
Whitebox tests that show that L{failure._Traceback} constructs objects that
can be used by L{traceback.extract_tb}.
If the objects can be used by L{traceback.extract_tb}, then they can be
formatted using L{traceback.format_tb} and friends.
"""
def test_singleFrame(self):
"""
A C{_Traceback} object constructed with a single frame should be able
to be passed to L{traceback.extract_tb}, and we should get a singleton
list containing a (filename, lineno, methodname, line) tuple.
"""
tb = failure._Traceback([['method', 'filename.py', 123, {}, {}]])
# Note that we don't need to test that extract_tb correctly extracts
# the line's contents. In this case, since filename.py doesn't exist,
# it will just use None.
self.assertEqual(traceback.extract_tb(tb),
[_tb('filename.py', 123, 'method', None)])
def test_manyFrames(self):
"""
A C{_Traceback} object constructed with multiple frames should be able
to be passed to L{traceback.extract_tb}, and we should get a list
containing a tuple for each frame.
"""
tb = failure._Traceback([
['method1', 'filename.py', 123, {}, {}],
['method2', 'filename.py', 235, {}, {}]])
self.assertEqual(traceback.extract_tb(tb),
[_tb('filename.py', 123, 'method1', None),
_tb('filename.py', 235, 'method2', None)])
class FrameAttributesTests(SynchronousTestCase):
"""
_Frame objects should possess some basic attributes that qualify them as
fake python Frame objects.
"""
def test_fakeFrameAttributes(self):
"""
L{_Frame} instances have the C{f_globals} and C{f_locals} attributes
bound to C{dict} instance. They also have the C{f_code} attribute
bound to something like a code object.
"""
frame = failure._Frame("dummyname", "dummyfilename")
self.assertIsInstance(frame.f_globals, dict)
self.assertIsInstance(frame.f_locals, dict)
self.assertIsInstance(frame.f_code, failure._Code)
class DebugModeTests(SynchronousTestCase):
"""
Failure's debug mode should allow jumping into the debugger.
"""
def setUp(self):
"""
Override pdb.post_mortem so we can make sure it's called.
"""
# Make sure any changes we make are reversed:
post_mortem = pdb.post_mortem
origInit = failure.Failure.__init__
def restore():
pdb.post_mortem = post_mortem
failure.Failure.__init__ = origInit
self.addCleanup(restore)
self.result = []
pdb.post_mortem = self.result.append
failure.startDebugMode()
def test_regularFailure(self):
"""
If startDebugMode() is called, calling Failure() will first call
pdb.post_mortem with the traceback.
"""
try:
1/0
except:
typ, exc, tb = sys.exc_info()
f = failure.Failure()
self.assertEqual(self.result, [tb])
self.assertFalse(f.captureVars)
def test_captureVars(self):
"""
If startDebugMode() is called, passing captureVars to Failure() will
not blow up.
"""
try:
1/0
except:
typ, exc, tb = sys.exc_info()
f = failure.Failure(captureVars=True)
self.assertEqual(self.result, [tb])
self.assertTrue(f.captureVars)
class ExtendedGeneratorTests(SynchronousTestCase):
"""
Tests C{failure.Failure} support for generator features added in Python 2.5
"""
def _throwIntoGenerator(self, f, g):
try:
f.throwExceptionIntoGenerator(g)
except StopIteration:
pass
else:
self.fail("throwExceptionIntoGenerator should have raised "
"StopIteration")
def test_throwExceptionIntoGenerator(self):
"""
It should be possible to throw the exception that a Failure
represents into a generator.
"""
stuff = []
def generator():
try:
yield
except:
stuff.append(sys.exc_info())
else:
self.fail("Yield should have yielded exception.")
g = generator()
f = getDivisionFailure()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(stuff[0][0], ZeroDivisionError)
self.assertIsInstance(stuff[0][1], ZeroDivisionError)
self.assertEqual(traceback.extract_tb(stuff[0][2])[-1][-1], "1/0")
def test_findFailureInGenerator(self):
"""
Within an exception handler, it should be possible to find the
original Failure that caused the current exception (if it was
caused by throwExceptionIntoGenerator).
"""
f = getDivisionFailure()
f.cleanFailure()
foundFailures = []
def generator():
try:
yield
except:
foundFailures.append(failure.Failure._findFailure())
else:
self.fail("No exception sent to generator")
g = generator()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(foundFailures, [f])
def test_failureConstructionFindsOriginalFailure(self):
"""
When a Failure is constructed in the context of an exception
handler that is handling an exception raised by
throwExceptionIntoGenerator, the new Failure should be chained to that
original Failure.
"""
f = getDivisionFailure()
f.cleanFailure()
newFailures = []
def generator():
try:
yield
except:
newFailures.append(failure.Failure())
else:
self.fail("No exception sent to generator")
g = generator()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(len(newFailures), 1)
self.assertEqual(newFailures[0].getTraceback(), f.getTraceback())
def test_ambiguousFailureInGenerator(self):
"""
When a generator reraises a different exception,
L{Failure._findFailure} inside the generator should find the reraised
exception rather than original one.
"""
def generator():
try:
try:
yield
except:
[][1]
except:
self.assertIsInstance(failure.Failure().value, IndexError)
g = generator()
next(g)
f = getDivisionFailure()
self._throwIntoGenerator(f, g)
def test_ambiguousFailureFromGenerator(self):
"""
When a generator reraises a different exception,
L{Failure._findFailure} above the generator should find the reraised
exception rather than original one.
"""
def generator():
try:
yield
except:
[][1]
g = generator()
next(g)
f = getDivisionFailure()
try:
self._throwIntoGenerator(f, g)
except:
self.assertIsInstance(failure.Failure().value, IndexError)
|
def assertDetailedTraceback(self, captureVars=False, cleanFailure=False):
"""
Assert that L{printDetailedTraceback} produces and prints a detailed
traceback.
The detailed traceback consists of a header::
*--- Failure #20 ---
The body contains the stacktrace::
/twisted/trial/_synctest.py:1180: _run(...)
/twisted/python/util.py:1076: runWithWarningsSuppressed(...)
--- <exception caught here> ---
/twisted/test/test_failure.py:39: getDivisionFailure(...)
If C{captureVars} is enabled the body also includes a list of
globals and locals::
[ Locals ]
exampleLocalVar : 'xyz'
...
( Globals )
...
Or when C{captureVars} is disabled::
[Capture of Locals and Globals disabled (use captureVars=True)]
When C{cleanFailure} is enabled references to other objects are removed
and replaced with strings.
And finally the footer with the L{Failure}'s value::
exceptions.ZeroDivisionError: float division
*--- End of Failure #20 ---
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
@param cleanFailure: Enables L{Failure.cleanFailure}.
@type cleanFailure: C{bool}
"""
if captureVars:
exampleLocalVar = 'xyz'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure(captureVars=captureVars)
out = NativeStringIO()
if cleanFailure:
f.cleanFailure()
f.printDetailedTraceback(out)
tb = out.getvalue()
start = "*--- Failure #%d%s---\n" % (f.count,
(f.pickled and ' (pickled) ') or ' ')
end = "%s: %s\n*--- End of Failure #%s ---\n" % (reflect.qual(f.type),
reflect.safe_str(f.value), f.count)
self.assertTracebackFormat(tb, start, end)
# Variables are printed on lines with 2 leading spaces.
linesWithVars = [line for line in tb.splitlines()
if line.startswith(' ')]
if captureVars:
self.assertNotEqual([], linesWithVars)
if cleanFailure:
line = ' exampleLocalVar : "\'xyz\'"'
else:
line = " exampleLocalVar : 'xyz'"
self.assertIn(line, linesWithVars)
else:
self.assertEqual([], linesWithVars)
self.assertIn(' [Capture of Locals and Globals disabled (use '
'captureVars=True)]\n', tb)
| 180 | 255 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for the L{twisted.python.failure} module.
"""
from __future__ import division, absolute_import
import re
import sys
import traceback
import pdb
import linecache
from twisted.python.compat import _PY3, NativeStringIO
from twisted.python import reflect
from twisted.python import failure
from twisted.trial.unittest import SkipTest, SynchronousTestCase
try:
from twisted.test import raiser
except ImportError:
raiser = None
def getDivisionFailure(*args, **kwargs):
"""
Make a C{Failure} of a divide-by-zero error.
@param args: Any C{*args} are passed to Failure's constructor.
@param kwargs: Any C{**kwargs} are passed to Failure's constructor.
"""
try:
1/0
except:
f = failure.Failure(*args, **kwargs)
return f
class FailureTests(SynchronousTestCase):
"""
Tests for L{failure.Failure}.
"""
def test_failAndTrap(self):
"""
Trapping a L{Failure}.
"""
try:
raise NotImplementedError('test')
except:
f = failure.Failure()
error = f.trap(SystemExit, RuntimeError)
self.assertEqual(error, RuntimeError)
self.assertEqual(f.type, NotImplementedError)
def test_trapRaisesWrappedException(self):
"""
If the wrapped C{Exception} is not a subclass of one of the
expected types, L{failure.Failure.trap} raises the wrapped
C{Exception}.
"""
if not _PY3:
raise SkipTest(
"""
Only expected behaviour on Python 3.
@see U{http://twisted.readthedocs.io/en/latest/core/howto/python3.html#twisted-python-failure}
"""
)
exception = ValueError()
try:
raise exception
except:
f = failure.Failure()
untrapped = self.assertRaises(ValueError, f.trap, OverflowError)
self.assertIs(exception, untrapped)
def test_trapRaisesSelf(self):
"""
If the wrapped C{Exception} is not a subclass of one of the
expected types, L{failure.Failure.trap} raises itself.
"""
if _PY3:
raise SkipTest(
"""
Only expected behaviour on Python 2.
@see U{http://twisted.readthedocs.io/en/latest/core/howto/python3.html#twisted-python-failure}
"""
)
exception = ValueError()
try:
raise exception
except:
f = failure.Failure()
untrapped = self.assertRaises(failure.Failure, f.trap, OverflowError)
self.assertIs(f, untrapped)
def test_failureValueFromFailure(self):
"""
A L{failure.Failure} constructed from another
L{failure.Failure} instance, has its C{value} property set to
the value of that L{failure.Failure} instance.
"""
exception = ValueError()
f1 = failure.Failure(exception)
f2 = failure.Failure(f1)
self.assertIs(f2.value, exception)
def test_failureValueFromFoundFailure(self):
"""
A L{failure.Failure} constructed without a C{exc_value}
argument, will search for an "original" C{Failure}, and if
found, its value will be used as the value for the new
C{Failure}.
"""
exception = ValueError()
f1 = failure.Failure(exception)
try:
f1.trap(OverflowError)
except:
f2 = failure.Failure()
self.assertIs(f2.value, exception)
def assertStartsWith(self, s, prefix):
"""
Assert that C{s} starts with a particular C{prefix}.
@param s: The input string.
@type s: C{str}
@param prefix: The string that C{s} should start with.
@type prefix: C{str}
"""
self.assertTrue(s.startswith(prefix),
'%r is not the start of %r' % (prefix, s))
def assertEndsWith(self, s, suffix):
"""
Assert that C{s} end with a particular C{suffix}.
@param s: The input string.
@type s: C{str}
@param suffix: The string that C{s} should end with.
@type suffix: C{str}
"""
self.assertTrue(s.endswith(suffix),
'%r is not the end of %r' % (suffix, s))
def assertTracebackFormat(self, tb, prefix, suffix):
"""
Assert that the C{tb} traceback contains a particular C{prefix} and
C{suffix}.
@param tb: The traceback string.
@type tb: C{str}
@param prefix: The string that C{tb} should start with.
@type prefix: C{str}
@param suffix: The string that C{tb} should end with.
@type suffix: C{str}
"""
self.assertStartsWith(tb, prefix)
self.assertEndsWith(tb, suffix)
def assertDetailedTraceback(self, captureVars=False, cleanFailure=False):
"""
Assert that L{printDetailedTraceback} produces and prints a detailed
traceback.
The detailed traceback consists of a header::
*--- Failure #20 ---
The body contains the stacktrace::
/twisted/trial/_synctest.py:1180: _run(...)
/twisted/python/util.py:1076: runWithWarningsSuppressed(...)
--- <exception caught here> ---
/twisted/test/test_failure.py:39: getDivisionFailure(...)
If C{captureVars} is enabled the body also includes a list of
globals and locals::
[ Locals ]
exampleLocalVar : 'xyz'
...
( Globals )
...
Or when C{captureVars} is disabled::
[Capture of Locals and Globals disabled (use captureVars=True)]
When C{cleanFailure} is enabled references to other objects are removed
and replaced with strings.
And finally the footer with the L{Failure}'s value::
exceptions.ZeroDivisionError: float division
*--- End of Failure #20 ---
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
@param cleanFailure: Enables L{Failure.cleanFailure}.
@type cleanFailure: C{bool}
"""
if captureVars:
exampleLocalVar = 'xyz'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure(captureVars=captureVars)
out = NativeStringIO()
if cleanFailure:
f.cleanFailure()
f.printDetailedTraceback(out)
tb = out.getvalue()
start = "*--- Failure #%d%s---\n" % (f.count,
(f.pickled and ' (pickled) ') or ' ')
end = "%s: %s\n*--- End of Failure #%s ---\n" % (reflect.qual(f.type),
reflect.safe_str(f.value), f.count)
self.assertTracebackFormat(tb, start, end)
# Variables are printed on lines with 2 leading spaces.
linesWithVars = [line for line in tb.splitlines()
if line.startswith(' ')]
if captureVars:
self.assertNotEqual([], linesWithVars)
if cleanFailure:
line = ' exampleLocalVar : "\'xyz\'"'
else:
line = " exampleLocalVar : 'xyz'"
self.assertIn(line, linesWithVars)
else:
self.assertEqual([], linesWithVars)
self.assertIn(' [Capture of Locals and Globals disabled (use '
'captureVars=True)]\n', tb)
def assertBriefTraceback(self, captureVars=False):
"""
Assert that L{printBriefTraceback} produces and prints a brief
traceback.
The brief traceback consists of a header::
Traceback: <type 'exceptions.ZeroDivisionError'>: float division
The body with the stacktrace::
/twisted/trial/_synctest.py:1180:_run
/twisted/python/util.py:1076:runWithWarningsSuppressed
And the footer::
--- <exception caught here> ---
/twisted/test/test_failure.py:39:getDivisionFailure
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
"""
if captureVars:
exampleLocalVar = 'abcde'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure()
out = NativeStringIO()
f.printBriefTraceback(out)
tb = out.getvalue()
stack = ''
for method, filename, lineno, localVars, globalVars in f.frames:
stack += '%s:%s:%s\n' % (filename, lineno, method)
zde = repr(ZeroDivisionError)
self.assertTracebackFormat(tb,
"Traceback: %s: " % (zde,),
"%s\n%s" % (failure.EXCEPTION_CAUGHT_HERE, stack))
if captureVars:
self.assertIsNone(re.search('exampleLocalVar.*abcde', tb))
def assertDefaultTraceback(self, captureVars=False):
"""
Assert that L{printTraceback} produces and prints a default traceback.
The default traceback consists of a header::
Traceback (most recent call last):
The body with traceback::
File "/twisted/trial/_synctest.py", line 1180, in _run
runWithWarningsSuppressed(suppress, method)
And the footer::
--- <exception caught here> ---
File "twisted/test/test_failure.py", line 39, in getDivisionFailure
1/0
exceptions.ZeroDivisionError: float division
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
"""
if captureVars:
exampleLocalVar = 'xyzzy'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure(captureVars=captureVars)
out = NativeStringIO()
f.printTraceback(out)
tb = out.getvalue()
stack = ''
for method, filename, lineno, localVars, globalVars in f.frames:
stack += ' File "%s", line %s, in %s\n' % (filename, lineno,
method)
stack += ' %s\n' % (linecache.getline(
filename, lineno).strip(),)
self.assertTracebackFormat(tb,
"Traceback (most recent call last):",
"%s\n%s%s: %s\n" % (failure.EXCEPTION_CAUGHT_HERE, stack,
reflect.qual(f.type), reflect.safe_str(f.value)))
if captureVars:
self.assertIsNone(re.search('exampleLocalVar.*xyzzy', tb))
def test_printDetailedTraceback(self):
"""
L{printDetailedTraceback} returns a detailed traceback including the
L{Failure}'s count.
"""
self.assertDetailedTraceback()
def test_printBriefTraceback(self):
"""
L{printBriefTraceback} returns a brief traceback.
"""
self.assertBriefTraceback()
def test_printTraceback(self):
"""
L{printTraceback} returns a traceback.
"""
self.assertDefaultTraceback()
def test_printDetailedTracebackCapturedVars(self):
"""
L{printDetailedTraceback} captures the locals and globals for its
stack frames and adds them to the traceback, when called on a
L{Failure} constructed with C{captureVars=True}.
"""
self.assertDetailedTraceback(captureVars=True)
def test_printBriefTracebackCapturedVars(self):
"""
L{printBriefTraceback} returns a brief traceback when called on a
L{Failure} constructed with C{captureVars=True}.
Local variables on the stack can not be seen in the resulting
traceback.
"""
self.assertBriefTraceback(captureVars=True)
def test_printTracebackCapturedVars(self):
"""
L{printTraceback} returns a traceback when called on a L{Failure}
constructed with C{captureVars=True}.
Local variables on the stack can not be seen in the resulting
traceback.
"""
self.assertDefaultTraceback(captureVars=True)
def test_printDetailedTracebackCapturedVarsCleaned(self):
"""
C{printDetailedTraceback} includes information about local variables on
the stack after C{cleanFailure} has been called.
"""
self.assertDetailedTraceback(captureVars=True, cleanFailure=True)
def test_invalidFormatFramesDetail(self):
"""
L{failure.format_frames} raises a L{ValueError} if the supplied
C{detail} level is unknown.
"""
self.assertRaises(ValueError, failure.format_frames, None, None,
detail='noisia')
def test_ExplictPass(self):
e = RuntimeError()
f = failure.Failure(e)
f.trap(RuntimeError)
self.assertEqual(f.value, e)
def _getInnermostFrameLine(self, f):
try:
f.raiseException()
except ZeroDivisionError:
tb = traceback.extract_tb(sys.exc_info()[2])
return tb[-1][-1]
else:
raise Exception(
"f.raiseException() didn't raise ZeroDivisionError!?")
def test_RaiseExceptionWithTB(self):
f = getDivisionFailure()
innerline = self._getInnermostFrameLine(f)
self.assertEqual(innerline, '1/0')
def test_stringExceptionConstruction(self):
"""
Constructing a C{Failure} with a string as its exception value raises
a C{TypeError}, as this is no longer supported as of Python 2.6.
"""
exc = self.assertRaises(TypeError, failure.Failure, "ono!")
self.assertIn("Strings are not supported by Failure", str(exc))
def test_ConstructionFails(self):
"""
Creating a Failure with no arguments causes it to try to discover the
current interpreter exception state. If no such state exists, creating
the Failure should raise a synchronous exception.
"""
if sys.version_info < (3, 0):
sys.exc_clear()
self.assertRaises(failure.NoCurrentExceptionError, failure.Failure)
def test_getTracebackObject(self):
"""
If the C{Failure} has not been cleaned, then C{getTracebackObject}
returns the traceback object that captured in its constructor.
"""
f = getDivisionFailure()
self.assertEqual(f.getTracebackObject(), f.tb)
def test_getTracebackObjectFromCaptureVars(self):
"""
C{captureVars=True} has no effect on the result of
C{getTracebackObject}.
"""
try:
1/0
except ZeroDivisionError:
noVarsFailure = failure.Failure()
varsFailure = failure.Failure(captureVars=True)
self.assertEqual(noVarsFailure.getTracebackObject(), varsFailure.tb)
def test_getTracebackObjectFromClean(self):
"""
If the Failure has been cleaned, then C{getTracebackObject} returns an
object that looks the same to L{traceback.extract_tb}.
"""
f = getDivisionFailure()
expected = traceback.extract_tb(f.getTracebackObject())
f.cleanFailure()
observed = traceback.extract_tb(f.getTracebackObject())
self.assertIsNotNone(expected)
self.assertEqual(expected, observed)
def test_getTracebackObjectFromCaptureVarsAndClean(self):
"""
If the Failure was created with captureVars, then C{getTracebackObject}
returns an object that looks the same to L{traceback.extract_tb}.
"""
f = getDivisionFailure(captureVars=True)
expected = traceback.extract_tb(f.getTracebackObject())
f.cleanFailure()
observed = traceback.extract_tb(f.getTracebackObject())
self.assertEqual(expected, observed)
def test_getTracebackObjectWithoutTraceback(self):
"""
L{failure.Failure}s need not be constructed with traceback objects. If
a C{Failure} has no traceback information at all, C{getTracebackObject}
just returns None.
None is a good value, because traceback.extract_tb(None) -> [].
"""
f = failure.Failure(Exception("some error"))
self.assertIsNone(f.getTracebackObject())
def test_tracebackFromExceptionInPython3(self):
"""
If a L{failure.Failure} is constructed with an exception but no
traceback in Python 3, the traceback will be extracted from the
exception's C{__traceback__} attribute.
"""
try:
1/0
except:
klass, exception, tb = sys.exc_info()
f = failure.Failure(exception)
self.assertIs(f.tb, tb)
def test_cleanFailureRemovesTracebackInPython3(self):
"""
L{failure.Failure.cleanFailure} sets the C{__traceback__} attribute of
the exception to L{None} in Python 3.
"""
f = getDivisionFailure()
self.assertIsNotNone(f.tb)
self.assertIs(f.value.__traceback__, f.tb)
f.cleanFailure()
self.assertIsNone(f.value.__traceback__)
if getattr(BaseException, "__traceback__", None) is None:
test_tracebackFromExceptionInPython3.skip = "Python 3 only."
test_cleanFailureRemovesTracebackInPython3.skip = "Python 3 only."
def test_repr(self):
"""
The C{repr} of a L{failure.Failure} shows the type and string
representation of the underlying exception.
"""
f = getDivisionFailure()
typeName = reflect.fullyQualifiedName(ZeroDivisionError)
self.assertEqual(
repr(f),
'<twisted.python.failure.Failure '
'%s: division by zero>' % (typeName,))
class BrokenStr(Exception):
"""
An exception class the instances of which cannot be presented as strings via
C{str}.
"""
def __str__(self):
# Could raise something else, but there's no point as yet.
raise self
class BrokenExceptionMetaclass(type):
"""
A metaclass for an exception type which cannot be presented as a string via
C{str}.
"""
def __str__(self):
raise ValueError("You cannot make a string out of me.")
class BrokenExceptionType(Exception, object):
"""
The aforementioned exception type which cnanot be presented as a string via
C{str}.
"""
__metaclass__ = BrokenExceptionMetaclass
class GetTracebackTests(SynchronousTestCase):
"""
Tests for L{Failure.getTraceback}.
"""
def _brokenValueTest(self, detail):
"""
Construct a L{Failure} with an exception that raises an exception from
its C{__str__} method and then call C{getTraceback} with the specified
detail and verify that it returns a string.
"""
x = BrokenStr()
f = failure.Failure(x)
traceback = f.getTraceback(detail=detail)
self.assertIsInstance(traceback, str)
def test_brokenValueBriefDetail(self):
"""
A L{Failure} might wrap an exception with a C{__str__} method which
raises an exception. In this case, calling C{getTraceback} on the
failure with the C{"brief"} detail does not raise an exception.
"""
self._brokenValueTest("brief")
def test_brokenValueDefaultDetail(self):
"""
Like test_brokenValueBriefDetail, but for the C{"default"} detail case.
"""
self._brokenValueTest("default")
def test_brokenValueVerboseDetail(self):
"""
Like test_brokenValueBriefDetail, but for the C{"default"} detail case.
"""
self._brokenValueTest("verbose")
def _brokenTypeTest(self, detail):
"""
Construct a L{Failure} with an exception type that raises an exception
from its C{__str__} method and then call C{getTraceback} with the
specified detail and verify that it returns a string.
"""
f = failure.Failure(BrokenExceptionType())
traceback = f.getTraceback(detail=detail)
self.assertIsInstance(traceback, str)
def test_brokenTypeBriefDetail(self):
"""
A L{Failure} might wrap an exception the type object of which has a
C{__str__} method which raises an exception. In this case, calling
C{getTraceback} on the failure with the C{"brief"} detail does not raise
an exception.
"""
self._brokenTypeTest("brief")
def test_brokenTypeDefaultDetail(self):
"""
Like test_brokenTypeBriefDetail, but for the C{"default"} detail case.
"""
self._brokenTypeTest("default")
def test_brokenTypeVerboseDetail(self):
"""
Like test_brokenTypeBriefDetail, but for the C{"verbose"} detail case.
"""
self._brokenTypeTest("verbose")
class FindFailureTests(SynchronousTestCase):
"""
Tests for functionality related to L{Failure._findFailure}.
"""
def test_findNoFailureInExceptionHandler(self):
"""
Within an exception handler, _findFailure should return
L{None} in case no Failure is associated with the current
exception.
"""
try:
1/0
except:
self.assertIsNone(failure.Failure._findFailure())
else:
self.fail("No exception raised from 1/0!?")
def test_findNoFailure(self):
"""
Outside of an exception handler, _findFailure should return None.
"""
if sys.version_info < (3, 0):
sys.exc_clear()
self.assertIsNone(sys.exc_info()[-1]) #environment sanity check
self.assertIsNone(failure.Failure._findFailure())
def test_findFailure(self):
"""
Within an exception handler, it should be possible to find the
original Failure that caused the current exception (if it was
caused by raiseException).
"""
f = getDivisionFailure()
f.cleanFailure()
try:
f.raiseException()
except:
self.assertEqual(failure.Failure._findFailure(), f)
else:
self.fail("No exception raised from raiseException!?")
def test_failureConstructionFindsOriginalFailure(self):
"""
When a Failure is constructed in the context of an exception
handler that is handling an exception raised by
raiseException, the new Failure should be chained to that
original Failure.
"""
f = getDivisionFailure()
f.cleanFailure()
try:
f.raiseException()
except:
newF = failure.Failure()
self.assertEqual(f.getTraceback(), newF.getTraceback())
else:
self.fail("No exception raised from raiseException!?")
def test_failureConstructionWithMungedStackSucceeds(self):
"""
Pyrex and Cython are known to insert fake stack frames so as to give
more Python-like tracebacks. These stack frames with empty code objects
should not break extraction of the exception.
"""
try:
raiser.raiseException()
except raiser.RaiserException:
f = failure.Failure()
self.assertTrue(f.check(raiser.RaiserException))
else:
self.fail("No exception raised from extension?!")
if raiser is None:
skipMsg = "raiser extension not available"
test_failureConstructionWithMungedStackSucceeds.skip = skipMsg
# On Python 3.5, extract_tb returns "FrameSummary" objects, which are almost
# like the old tuples. This being different does not affect the actual tests
# as we are testing that the input works, and that extract_tb returns something
# reasonable.
if sys.version_info < (3, 5):
_tb = lambda fn, lineno, name, text: (fn, lineno, name, text)
else:
from traceback import FrameSummary
_tb = lambda fn, lineno, name, text: FrameSummary(fn, lineno, name)
class FormattableTracebackTests(SynchronousTestCase):
"""
Whitebox tests that show that L{failure._Traceback} constructs objects that
can be used by L{traceback.extract_tb}.
If the objects can be used by L{traceback.extract_tb}, then they can be
formatted using L{traceback.format_tb} and friends.
"""
def test_singleFrame(self):
"""
A C{_Traceback} object constructed with a single frame should be able
to be passed to L{traceback.extract_tb}, and we should get a singleton
list containing a (filename, lineno, methodname, line) tuple.
"""
tb = failure._Traceback([['method', 'filename.py', 123, {}, {}]])
# Note that we don't need to test that extract_tb correctly extracts
# the line's contents. In this case, since filename.py doesn't exist,
# it will just use None.
self.assertEqual(traceback.extract_tb(tb),
[_tb('filename.py', 123, 'method', None)])
def test_manyFrames(self):
"""
A C{_Traceback} object constructed with multiple frames should be able
to be passed to L{traceback.extract_tb}, and we should get a list
containing a tuple for each frame.
"""
tb = failure._Traceback([
['method1', 'filename.py', 123, {}, {}],
['method2', 'filename.py', 235, {}, {}]])
self.assertEqual(traceback.extract_tb(tb),
[_tb('filename.py', 123, 'method1', None),
_tb('filename.py', 235, 'method2', None)])
class FrameAttributesTests(SynchronousTestCase):
"""
_Frame objects should possess some basic attributes that qualify them as
fake python Frame objects.
"""
def test_fakeFrameAttributes(self):
"""
L{_Frame} instances have the C{f_globals} and C{f_locals} attributes
bound to C{dict} instance. They also have the C{f_code} attribute
bound to something like a code object.
"""
frame = failure._Frame("dummyname", "dummyfilename")
self.assertIsInstance(frame.f_globals, dict)
self.assertIsInstance(frame.f_locals, dict)
self.assertIsInstance(frame.f_code, failure._Code)
class DebugModeTests(SynchronousTestCase):
"""
Failure's debug mode should allow jumping into the debugger.
"""
def setUp(self):
"""
Override pdb.post_mortem so we can make sure it's called.
"""
# Make sure any changes we make are reversed:
post_mortem = pdb.post_mortem
origInit = failure.Failure.__init__
def restore():
pdb.post_mortem = post_mortem
failure.Failure.__init__ = origInit
self.addCleanup(restore)
self.result = []
pdb.post_mortem = self.result.append
failure.startDebugMode()
def test_regularFailure(self):
"""
If startDebugMode() is called, calling Failure() will first call
pdb.post_mortem with the traceback.
"""
try:
1/0
except:
typ, exc, tb = sys.exc_info()
f = failure.Failure()
self.assertEqual(self.result, [tb])
self.assertFalse(f.captureVars)
def test_captureVars(self):
"""
If startDebugMode() is called, passing captureVars to Failure() will
not blow up.
"""
try:
1/0
except:
typ, exc, tb = sys.exc_info()
f = failure.Failure(captureVars=True)
self.assertEqual(self.result, [tb])
self.assertTrue(f.captureVars)
class ExtendedGeneratorTests(SynchronousTestCase):
"""
Tests C{failure.Failure} support for generator features added in Python 2.5
"""
def _throwIntoGenerator(self, f, g):
try:
f.throwExceptionIntoGenerator(g)
except StopIteration:
pass
else:
self.fail("throwExceptionIntoGenerator should have raised "
"StopIteration")
def test_throwExceptionIntoGenerator(self):
"""
It should be possible to throw the exception that a Failure
represents into a generator.
"""
stuff = []
def generator():
try:
yield
except:
stuff.append(sys.exc_info())
else:
self.fail("Yield should have yielded exception.")
g = generator()
f = getDivisionFailure()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(stuff[0][0], ZeroDivisionError)
self.assertIsInstance(stuff[0][1], ZeroDivisionError)
self.assertEqual(traceback.extract_tb(stuff[0][2])[-1][-1], "1/0")
def test_findFailureInGenerator(self):
"""
Within an exception handler, it should be possible to find the
original Failure that caused the current exception (if it was
caused by throwExceptionIntoGenerator).
"""
f = getDivisionFailure()
f.cleanFailure()
foundFailures = []
def generator():
try:
yield
except:
foundFailures.append(failure.Failure._findFailure())
else:
self.fail("No exception sent to generator")
g = generator()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(foundFailures, [f])
def test_failureConstructionFindsOriginalFailure(self):
"""
When a Failure is constructed in the context of an exception
handler that is handling an exception raised by
throwExceptionIntoGenerator, the new Failure should be chained to that
original Failure.
"""
f = getDivisionFailure()
f.cleanFailure()
newFailures = []
def generator():
try:
yield
except:
newFailures.append(failure.Failure())
else:
self.fail("No exception sent to generator")
g = generator()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(len(newFailures), 1)
self.assertEqual(newFailures[0].getTraceback(), f.getTraceback())
def test_ambiguousFailureInGenerator(self):
"""
When a generator reraises a different exception,
L{Failure._findFailure} inside the generator should find the reraised
exception rather than original one.
"""
def generator():
try:
try:
yield
except:
[][1]
except:
self.assertIsInstance(failure.Failure().value, IndexError)
g = generator()
next(g)
f = getDivisionFailure()
self._throwIntoGenerator(f, g)
def test_ambiguousFailureFromGenerator(self):
"""
When a generator reraises a different exception,
L{Failure._findFailure} above the generator should find the reraised
exception rather than original one.
"""
def generator():
try:
yield
except:
[][1]
g = generator()
next(g)
f = getDivisionFailure()
try:
self._throwIntoGenerator(f, g)
except:
self.assertIsInstance(failure.Failure().value, IndexError)
|
assertBriefTraceback
|
Assert that L{printBriefTraceback} produces and prints a brief
traceback.
The brief traceback consists of a header::
Traceback: <type 'exceptions.ZeroDivisionError'>: float division
The body with the stacktrace::
/twisted/trial/_synctest.py:1180:_run
/twisted/python/util.py:1076:runWithWarningsSuppressed
And the footer::
--- <exception caught here> ---
/twisted/test/test_failure.py:39:getDivisionFailure
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for the L{twisted.python.failure} module.
"""
from __future__ import division, absolute_import
import re
import sys
import traceback
import pdb
import linecache
from twisted.python.compat import _PY3, NativeStringIO
from twisted.python import reflect
from twisted.python import failure
from twisted.trial.unittest import SkipTest, SynchronousTestCase
try:
from twisted.test import raiser
except ImportError:
raiser = None
def getDivisionFailure(*args, **kwargs):
"""
Make a C{Failure} of a divide-by-zero error.
@param args: Any C{*args} are passed to Failure's constructor.
@param kwargs: Any C{**kwargs} are passed to Failure's constructor.
"""
try:
1/0
except:
f = failure.Failure(*args, **kwargs)
return f
class FailureTests(SynchronousTestCase):
"""
Tests for L{failure.Failure}.
"""
def test_failAndTrap(self):
"""
Trapping a L{Failure}.
"""
try:
raise NotImplementedError('test')
except:
f = failure.Failure()
error = f.trap(SystemExit, RuntimeError)
self.assertEqual(error, RuntimeError)
self.assertEqual(f.type, NotImplementedError)
def test_trapRaisesWrappedException(self):
"""
If the wrapped C{Exception} is not a subclass of one of the
expected types, L{failure.Failure.trap} raises the wrapped
C{Exception}.
"""
if not _PY3:
raise SkipTest(
"""
Only expected behaviour on Python 3.
@see U{http://twisted.readthedocs.io/en/latest/core/howto/python3.html#twisted-python-failure}
"""
)
exception = ValueError()
try:
raise exception
except:
f = failure.Failure()
untrapped = self.assertRaises(ValueError, f.trap, OverflowError)
self.assertIs(exception, untrapped)
def test_trapRaisesSelf(self):
"""
If the wrapped C{Exception} is not a subclass of one of the
expected types, L{failure.Failure.trap} raises itself.
"""
if _PY3:
raise SkipTest(
"""
Only expected behaviour on Python 2.
@see U{http://twisted.readthedocs.io/en/latest/core/howto/python3.html#twisted-python-failure}
"""
)
exception = ValueError()
try:
raise exception
except:
f = failure.Failure()
untrapped = self.assertRaises(failure.Failure, f.trap, OverflowError)
self.assertIs(f, untrapped)
def test_failureValueFromFailure(self):
"""
A L{failure.Failure} constructed from another
L{failure.Failure} instance, has its C{value} property set to
the value of that L{failure.Failure} instance.
"""
exception = ValueError()
f1 = failure.Failure(exception)
f2 = failure.Failure(f1)
self.assertIs(f2.value, exception)
def test_failureValueFromFoundFailure(self):
"""
A L{failure.Failure} constructed without a C{exc_value}
argument, will search for an "original" C{Failure}, and if
found, its value will be used as the value for the new
C{Failure}.
"""
exception = ValueError()
f1 = failure.Failure(exception)
try:
f1.trap(OverflowError)
except:
f2 = failure.Failure()
self.assertIs(f2.value, exception)
def assertStartsWith(self, s, prefix):
"""
Assert that C{s} starts with a particular C{prefix}.
@param s: The input string.
@type s: C{str}
@param prefix: The string that C{s} should start with.
@type prefix: C{str}
"""
self.assertTrue(s.startswith(prefix),
'%r is not the start of %r' % (prefix, s))
def assertEndsWith(self, s, suffix):
"""
Assert that C{s} end with a particular C{suffix}.
@param s: The input string.
@type s: C{str}
@param suffix: The string that C{s} should end with.
@type suffix: C{str}
"""
self.assertTrue(s.endswith(suffix),
'%r is not the end of %r' % (suffix, s))
def assertTracebackFormat(self, tb, prefix, suffix):
"""
Assert that the C{tb} traceback contains a particular C{prefix} and
C{suffix}.
@param tb: The traceback string.
@type tb: C{str}
@param prefix: The string that C{tb} should start with.
@type prefix: C{str}
@param suffix: The string that C{tb} should end with.
@type suffix: C{str}
"""
self.assertStartsWith(tb, prefix)
self.assertEndsWith(tb, suffix)
def assertDetailedTraceback(self, captureVars=False, cleanFailure=False):
"""
Assert that L{printDetailedTraceback} produces and prints a detailed
traceback.
The detailed traceback consists of a header::
*--- Failure #20 ---
The body contains the stacktrace::
/twisted/trial/_synctest.py:1180: _run(...)
/twisted/python/util.py:1076: runWithWarningsSuppressed(...)
--- <exception caught here> ---
/twisted/test/test_failure.py:39: getDivisionFailure(...)
If C{captureVars} is enabled the body also includes a list of
globals and locals::
[ Locals ]
exampleLocalVar : 'xyz'
...
( Globals )
...
Or when C{captureVars} is disabled::
[Capture of Locals and Globals disabled (use captureVars=True)]
When C{cleanFailure} is enabled references to other objects are removed
and replaced with strings.
And finally the footer with the L{Failure}'s value::
exceptions.ZeroDivisionError: float division
*--- End of Failure #20 ---
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
@param cleanFailure: Enables L{Failure.cleanFailure}.
@type cleanFailure: C{bool}
"""
if captureVars:
exampleLocalVar = 'xyz'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure(captureVars=captureVars)
out = NativeStringIO()
if cleanFailure:
f.cleanFailure()
f.printDetailedTraceback(out)
tb = out.getvalue()
start = "*--- Failure #%d%s---\n" % (f.count,
(f.pickled and ' (pickled) ') or ' ')
end = "%s: %s\n*--- End of Failure #%s ---\n" % (reflect.qual(f.type),
reflect.safe_str(f.value), f.count)
self.assertTracebackFormat(tb, start, end)
# Variables are printed on lines with 2 leading spaces.
linesWithVars = [line for line in tb.splitlines()
if line.startswith(' ')]
if captureVars:
self.assertNotEqual([], linesWithVars)
if cleanFailure:
line = ' exampleLocalVar : "\'xyz\'"'
else:
line = " exampleLocalVar : 'xyz'"
self.assertIn(line, linesWithVars)
else:
self.assertEqual([], linesWithVars)
self.assertIn(' [Capture of Locals and Globals disabled (use '
'captureVars=True)]\n', tb)
# MASKED: assertBriefTraceback function (lines 258-300)
def assertDefaultTraceback(self, captureVars=False):
"""
Assert that L{printTraceback} produces and prints a default traceback.
The default traceback consists of a header::
Traceback (most recent call last):
The body with traceback::
File "/twisted/trial/_synctest.py", line 1180, in _run
runWithWarningsSuppressed(suppress, method)
And the footer::
--- <exception caught here> ---
File "twisted/test/test_failure.py", line 39, in getDivisionFailure
1/0
exceptions.ZeroDivisionError: float division
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
"""
if captureVars:
exampleLocalVar = 'xyzzy'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure(captureVars=captureVars)
out = NativeStringIO()
f.printTraceback(out)
tb = out.getvalue()
stack = ''
for method, filename, lineno, localVars, globalVars in f.frames:
stack += ' File "%s", line %s, in %s\n' % (filename, lineno,
method)
stack += ' %s\n' % (linecache.getline(
filename, lineno).strip(),)
self.assertTracebackFormat(tb,
"Traceback (most recent call last):",
"%s\n%s%s: %s\n" % (failure.EXCEPTION_CAUGHT_HERE, stack,
reflect.qual(f.type), reflect.safe_str(f.value)))
if captureVars:
self.assertIsNone(re.search('exampleLocalVar.*xyzzy', tb))
def test_printDetailedTraceback(self):
"""
L{printDetailedTraceback} returns a detailed traceback including the
L{Failure}'s count.
"""
self.assertDetailedTraceback()
def test_printBriefTraceback(self):
"""
L{printBriefTraceback} returns a brief traceback.
"""
self.assertBriefTraceback()
def test_printTraceback(self):
"""
L{printTraceback} returns a traceback.
"""
self.assertDefaultTraceback()
def test_printDetailedTracebackCapturedVars(self):
"""
L{printDetailedTraceback} captures the locals and globals for its
stack frames and adds them to the traceback, when called on a
L{Failure} constructed with C{captureVars=True}.
"""
self.assertDetailedTraceback(captureVars=True)
def test_printBriefTracebackCapturedVars(self):
"""
L{printBriefTraceback} returns a brief traceback when called on a
L{Failure} constructed with C{captureVars=True}.
Local variables on the stack can not be seen in the resulting
traceback.
"""
self.assertBriefTraceback(captureVars=True)
def test_printTracebackCapturedVars(self):
"""
L{printTraceback} returns a traceback when called on a L{Failure}
constructed with C{captureVars=True}.
Local variables on the stack can not be seen in the resulting
traceback.
"""
self.assertDefaultTraceback(captureVars=True)
def test_printDetailedTracebackCapturedVarsCleaned(self):
"""
C{printDetailedTraceback} includes information about local variables on
the stack after C{cleanFailure} has been called.
"""
self.assertDetailedTraceback(captureVars=True, cleanFailure=True)
def test_invalidFormatFramesDetail(self):
"""
L{failure.format_frames} raises a L{ValueError} if the supplied
C{detail} level is unknown.
"""
self.assertRaises(ValueError, failure.format_frames, None, None,
detail='noisia')
def test_ExplictPass(self):
e = RuntimeError()
f = failure.Failure(e)
f.trap(RuntimeError)
self.assertEqual(f.value, e)
def _getInnermostFrameLine(self, f):
try:
f.raiseException()
except ZeroDivisionError:
tb = traceback.extract_tb(sys.exc_info()[2])
return tb[-1][-1]
else:
raise Exception(
"f.raiseException() didn't raise ZeroDivisionError!?")
def test_RaiseExceptionWithTB(self):
f = getDivisionFailure()
innerline = self._getInnermostFrameLine(f)
self.assertEqual(innerline, '1/0')
def test_stringExceptionConstruction(self):
"""
Constructing a C{Failure} with a string as its exception value raises
a C{TypeError}, as this is no longer supported as of Python 2.6.
"""
exc = self.assertRaises(TypeError, failure.Failure, "ono!")
self.assertIn("Strings are not supported by Failure", str(exc))
def test_ConstructionFails(self):
"""
Creating a Failure with no arguments causes it to try to discover the
current interpreter exception state. If no such state exists, creating
the Failure should raise a synchronous exception.
"""
if sys.version_info < (3, 0):
sys.exc_clear()
self.assertRaises(failure.NoCurrentExceptionError, failure.Failure)
def test_getTracebackObject(self):
"""
If the C{Failure} has not been cleaned, then C{getTracebackObject}
returns the traceback object that captured in its constructor.
"""
f = getDivisionFailure()
self.assertEqual(f.getTracebackObject(), f.tb)
def test_getTracebackObjectFromCaptureVars(self):
"""
C{captureVars=True} has no effect on the result of
C{getTracebackObject}.
"""
try:
1/0
except ZeroDivisionError:
noVarsFailure = failure.Failure()
varsFailure = failure.Failure(captureVars=True)
self.assertEqual(noVarsFailure.getTracebackObject(), varsFailure.tb)
def test_getTracebackObjectFromClean(self):
"""
If the Failure has been cleaned, then C{getTracebackObject} returns an
object that looks the same to L{traceback.extract_tb}.
"""
f = getDivisionFailure()
expected = traceback.extract_tb(f.getTracebackObject())
f.cleanFailure()
observed = traceback.extract_tb(f.getTracebackObject())
self.assertIsNotNone(expected)
self.assertEqual(expected, observed)
def test_getTracebackObjectFromCaptureVarsAndClean(self):
"""
If the Failure was created with captureVars, then C{getTracebackObject}
returns an object that looks the same to L{traceback.extract_tb}.
"""
f = getDivisionFailure(captureVars=True)
expected = traceback.extract_tb(f.getTracebackObject())
f.cleanFailure()
observed = traceback.extract_tb(f.getTracebackObject())
self.assertEqual(expected, observed)
def test_getTracebackObjectWithoutTraceback(self):
"""
L{failure.Failure}s need not be constructed with traceback objects. If
a C{Failure} has no traceback information at all, C{getTracebackObject}
just returns None.
None is a good value, because traceback.extract_tb(None) -> [].
"""
f = failure.Failure(Exception("some error"))
self.assertIsNone(f.getTracebackObject())
def test_tracebackFromExceptionInPython3(self):
"""
If a L{failure.Failure} is constructed with an exception but no
traceback in Python 3, the traceback will be extracted from the
exception's C{__traceback__} attribute.
"""
try:
1/0
except:
klass, exception, tb = sys.exc_info()
f = failure.Failure(exception)
self.assertIs(f.tb, tb)
def test_cleanFailureRemovesTracebackInPython3(self):
"""
L{failure.Failure.cleanFailure} sets the C{__traceback__} attribute of
the exception to L{None} in Python 3.
"""
f = getDivisionFailure()
self.assertIsNotNone(f.tb)
self.assertIs(f.value.__traceback__, f.tb)
f.cleanFailure()
self.assertIsNone(f.value.__traceback__)
if getattr(BaseException, "__traceback__", None) is None:
test_tracebackFromExceptionInPython3.skip = "Python 3 only."
test_cleanFailureRemovesTracebackInPython3.skip = "Python 3 only."
def test_repr(self):
"""
The C{repr} of a L{failure.Failure} shows the type and string
representation of the underlying exception.
"""
f = getDivisionFailure()
typeName = reflect.fullyQualifiedName(ZeroDivisionError)
self.assertEqual(
repr(f),
'<twisted.python.failure.Failure '
'%s: division by zero>' % (typeName,))
class BrokenStr(Exception):
"""
An exception class the instances of which cannot be presented as strings via
C{str}.
"""
def __str__(self):
# Could raise something else, but there's no point as yet.
raise self
class BrokenExceptionMetaclass(type):
"""
A metaclass for an exception type which cannot be presented as a string via
C{str}.
"""
def __str__(self):
raise ValueError("You cannot make a string out of me.")
class BrokenExceptionType(Exception, object):
"""
The aforementioned exception type which cnanot be presented as a string via
C{str}.
"""
__metaclass__ = BrokenExceptionMetaclass
class GetTracebackTests(SynchronousTestCase):
"""
Tests for L{Failure.getTraceback}.
"""
def _brokenValueTest(self, detail):
"""
Construct a L{Failure} with an exception that raises an exception from
its C{__str__} method and then call C{getTraceback} with the specified
detail and verify that it returns a string.
"""
x = BrokenStr()
f = failure.Failure(x)
traceback = f.getTraceback(detail=detail)
self.assertIsInstance(traceback, str)
def test_brokenValueBriefDetail(self):
"""
A L{Failure} might wrap an exception with a C{__str__} method which
raises an exception. In this case, calling C{getTraceback} on the
failure with the C{"brief"} detail does not raise an exception.
"""
self._brokenValueTest("brief")
def test_brokenValueDefaultDetail(self):
"""
Like test_brokenValueBriefDetail, but for the C{"default"} detail case.
"""
self._brokenValueTest("default")
def test_brokenValueVerboseDetail(self):
"""
Like test_brokenValueBriefDetail, but for the C{"default"} detail case.
"""
self._brokenValueTest("verbose")
def _brokenTypeTest(self, detail):
"""
Construct a L{Failure} with an exception type that raises an exception
from its C{__str__} method and then call C{getTraceback} with the
specified detail and verify that it returns a string.
"""
f = failure.Failure(BrokenExceptionType())
traceback = f.getTraceback(detail=detail)
self.assertIsInstance(traceback, str)
def test_brokenTypeBriefDetail(self):
"""
A L{Failure} might wrap an exception the type object of which has a
C{__str__} method which raises an exception. In this case, calling
C{getTraceback} on the failure with the C{"brief"} detail does not raise
an exception.
"""
self._brokenTypeTest("brief")
def test_brokenTypeDefaultDetail(self):
"""
Like test_brokenTypeBriefDetail, but for the C{"default"} detail case.
"""
self._brokenTypeTest("default")
def test_brokenTypeVerboseDetail(self):
"""
Like test_brokenTypeBriefDetail, but for the C{"verbose"} detail case.
"""
self._brokenTypeTest("verbose")
class FindFailureTests(SynchronousTestCase):
"""
Tests for functionality related to L{Failure._findFailure}.
"""
def test_findNoFailureInExceptionHandler(self):
"""
Within an exception handler, _findFailure should return
L{None} in case no Failure is associated with the current
exception.
"""
try:
1/0
except:
self.assertIsNone(failure.Failure._findFailure())
else:
self.fail("No exception raised from 1/0!?")
def test_findNoFailure(self):
"""
Outside of an exception handler, _findFailure should return None.
"""
if sys.version_info < (3, 0):
sys.exc_clear()
self.assertIsNone(sys.exc_info()[-1]) #environment sanity check
self.assertIsNone(failure.Failure._findFailure())
def test_findFailure(self):
"""
Within an exception handler, it should be possible to find the
original Failure that caused the current exception (if it was
caused by raiseException).
"""
f = getDivisionFailure()
f.cleanFailure()
try:
f.raiseException()
except:
self.assertEqual(failure.Failure._findFailure(), f)
else:
self.fail("No exception raised from raiseException!?")
def test_failureConstructionFindsOriginalFailure(self):
"""
When a Failure is constructed in the context of an exception
handler that is handling an exception raised by
raiseException, the new Failure should be chained to that
original Failure.
"""
f = getDivisionFailure()
f.cleanFailure()
try:
f.raiseException()
except:
newF = failure.Failure()
self.assertEqual(f.getTraceback(), newF.getTraceback())
else:
self.fail("No exception raised from raiseException!?")
def test_failureConstructionWithMungedStackSucceeds(self):
"""
Pyrex and Cython are known to insert fake stack frames so as to give
more Python-like tracebacks. These stack frames with empty code objects
should not break extraction of the exception.
"""
try:
raiser.raiseException()
except raiser.RaiserException:
f = failure.Failure()
self.assertTrue(f.check(raiser.RaiserException))
else:
self.fail("No exception raised from extension?!")
if raiser is None:
skipMsg = "raiser extension not available"
test_failureConstructionWithMungedStackSucceeds.skip = skipMsg
# On Python 3.5, extract_tb returns "FrameSummary" objects, which are almost
# like the old tuples. This being different does not affect the actual tests
# as we are testing that the input works, and that extract_tb returns something
# reasonable.
if sys.version_info < (3, 5):
_tb = lambda fn, lineno, name, text: (fn, lineno, name, text)
else:
from traceback import FrameSummary
_tb = lambda fn, lineno, name, text: FrameSummary(fn, lineno, name)
class FormattableTracebackTests(SynchronousTestCase):
"""
Whitebox tests that show that L{failure._Traceback} constructs objects that
can be used by L{traceback.extract_tb}.
If the objects can be used by L{traceback.extract_tb}, then they can be
formatted using L{traceback.format_tb} and friends.
"""
def test_singleFrame(self):
"""
A C{_Traceback} object constructed with a single frame should be able
to be passed to L{traceback.extract_tb}, and we should get a singleton
list containing a (filename, lineno, methodname, line) tuple.
"""
tb = failure._Traceback([['method', 'filename.py', 123, {}, {}]])
# Note that we don't need to test that extract_tb correctly extracts
# the line's contents. In this case, since filename.py doesn't exist,
# it will just use None.
self.assertEqual(traceback.extract_tb(tb),
[_tb('filename.py', 123, 'method', None)])
def test_manyFrames(self):
"""
A C{_Traceback} object constructed with multiple frames should be able
to be passed to L{traceback.extract_tb}, and we should get a list
containing a tuple for each frame.
"""
tb = failure._Traceback([
['method1', 'filename.py', 123, {}, {}],
['method2', 'filename.py', 235, {}, {}]])
self.assertEqual(traceback.extract_tb(tb),
[_tb('filename.py', 123, 'method1', None),
_tb('filename.py', 235, 'method2', None)])
class FrameAttributesTests(SynchronousTestCase):
"""
_Frame objects should possess some basic attributes that qualify them as
fake python Frame objects.
"""
def test_fakeFrameAttributes(self):
"""
L{_Frame} instances have the C{f_globals} and C{f_locals} attributes
bound to C{dict} instance. They also have the C{f_code} attribute
bound to something like a code object.
"""
frame = failure._Frame("dummyname", "dummyfilename")
self.assertIsInstance(frame.f_globals, dict)
self.assertIsInstance(frame.f_locals, dict)
self.assertIsInstance(frame.f_code, failure._Code)
class DebugModeTests(SynchronousTestCase):
"""
Failure's debug mode should allow jumping into the debugger.
"""
def setUp(self):
"""
Override pdb.post_mortem so we can make sure it's called.
"""
# Make sure any changes we make are reversed:
post_mortem = pdb.post_mortem
origInit = failure.Failure.__init__
def restore():
pdb.post_mortem = post_mortem
failure.Failure.__init__ = origInit
self.addCleanup(restore)
self.result = []
pdb.post_mortem = self.result.append
failure.startDebugMode()
def test_regularFailure(self):
"""
If startDebugMode() is called, calling Failure() will first call
pdb.post_mortem with the traceback.
"""
try:
1/0
except:
typ, exc, tb = sys.exc_info()
f = failure.Failure()
self.assertEqual(self.result, [tb])
self.assertFalse(f.captureVars)
def test_captureVars(self):
"""
If startDebugMode() is called, passing captureVars to Failure() will
not blow up.
"""
try:
1/0
except:
typ, exc, tb = sys.exc_info()
f = failure.Failure(captureVars=True)
self.assertEqual(self.result, [tb])
self.assertTrue(f.captureVars)
class ExtendedGeneratorTests(SynchronousTestCase):
"""
Tests C{failure.Failure} support for generator features added in Python 2.5
"""
def _throwIntoGenerator(self, f, g):
try:
f.throwExceptionIntoGenerator(g)
except StopIteration:
pass
else:
self.fail("throwExceptionIntoGenerator should have raised "
"StopIteration")
def test_throwExceptionIntoGenerator(self):
"""
It should be possible to throw the exception that a Failure
represents into a generator.
"""
stuff = []
def generator():
try:
yield
except:
stuff.append(sys.exc_info())
else:
self.fail("Yield should have yielded exception.")
g = generator()
f = getDivisionFailure()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(stuff[0][0], ZeroDivisionError)
self.assertIsInstance(stuff[0][1], ZeroDivisionError)
self.assertEqual(traceback.extract_tb(stuff[0][2])[-1][-1], "1/0")
def test_findFailureInGenerator(self):
"""
Within an exception handler, it should be possible to find the
original Failure that caused the current exception (if it was
caused by throwExceptionIntoGenerator).
"""
f = getDivisionFailure()
f.cleanFailure()
foundFailures = []
def generator():
try:
yield
except:
foundFailures.append(failure.Failure._findFailure())
else:
self.fail("No exception sent to generator")
g = generator()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(foundFailures, [f])
def test_failureConstructionFindsOriginalFailure(self):
"""
When a Failure is constructed in the context of an exception
handler that is handling an exception raised by
throwExceptionIntoGenerator, the new Failure should be chained to that
original Failure.
"""
f = getDivisionFailure()
f.cleanFailure()
newFailures = []
def generator():
try:
yield
except:
newFailures.append(failure.Failure())
else:
self.fail("No exception sent to generator")
g = generator()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(len(newFailures), 1)
self.assertEqual(newFailures[0].getTraceback(), f.getTraceback())
def test_ambiguousFailureInGenerator(self):
"""
When a generator reraises a different exception,
L{Failure._findFailure} inside the generator should find the reraised
exception rather than original one.
"""
def generator():
try:
try:
yield
except:
[][1]
except:
self.assertIsInstance(failure.Failure().value, IndexError)
g = generator()
next(g)
f = getDivisionFailure()
self._throwIntoGenerator(f, g)
def test_ambiguousFailureFromGenerator(self):
"""
When a generator reraises a different exception,
L{Failure._findFailure} above the generator should find the reraised
exception rather than original one.
"""
def generator():
try:
yield
except:
[][1]
g = generator()
next(g)
f = getDivisionFailure()
try:
self._throwIntoGenerator(f, g)
except:
self.assertIsInstance(failure.Failure().value, IndexError)
|
def assertBriefTraceback(self, captureVars=False):
"""
Assert that L{printBriefTraceback} produces and prints a brief
traceback.
The brief traceback consists of a header::
Traceback: <type 'exceptions.ZeroDivisionError'>: float division
The body with the stacktrace::
/twisted/trial/_synctest.py:1180:_run
/twisted/python/util.py:1076:runWithWarningsSuppressed
And the footer::
--- <exception caught here> ---
/twisted/test/test_failure.py:39:getDivisionFailure
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
"""
if captureVars:
exampleLocalVar = 'abcde'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure()
out = NativeStringIO()
f.printBriefTraceback(out)
tb = out.getvalue()
stack = ''
for method, filename, lineno, localVars, globalVars in f.frames:
stack += '%s:%s:%s\n' % (filename, lineno, method)
zde = repr(ZeroDivisionError)
self.assertTracebackFormat(tb,
"Traceback: %s: " % (zde,),
"%s\n%s" % (failure.EXCEPTION_CAUGHT_HERE, stack))
if captureVars:
self.assertIsNone(re.search('exampleLocalVar.*abcde', tb))
| 258 | 300 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for the L{twisted.python.failure} module.
"""
from __future__ import division, absolute_import
import re
import sys
import traceback
import pdb
import linecache
from twisted.python.compat import _PY3, NativeStringIO
from twisted.python import reflect
from twisted.python import failure
from twisted.trial.unittest import SkipTest, SynchronousTestCase
try:
from twisted.test import raiser
except ImportError:
raiser = None
def getDivisionFailure(*args, **kwargs):
"""
Make a C{Failure} of a divide-by-zero error.
@param args: Any C{*args} are passed to Failure's constructor.
@param kwargs: Any C{**kwargs} are passed to Failure's constructor.
"""
try:
1/0
except:
f = failure.Failure(*args, **kwargs)
return f
class FailureTests(SynchronousTestCase):
"""
Tests for L{failure.Failure}.
"""
def test_failAndTrap(self):
"""
Trapping a L{Failure}.
"""
try:
raise NotImplementedError('test')
except:
f = failure.Failure()
error = f.trap(SystemExit, RuntimeError)
self.assertEqual(error, RuntimeError)
self.assertEqual(f.type, NotImplementedError)
def test_trapRaisesWrappedException(self):
"""
If the wrapped C{Exception} is not a subclass of one of the
expected types, L{failure.Failure.trap} raises the wrapped
C{Exception}.
"""
if not _PY3:
raise SkipTest(
"""
Only expected behaviour on Python 3.
@see U{http://twisted.readthedocs.io/en/latest/core/howto/python3.html#twisted-python-failure}
"""
)
exception = ValueError()
try:
raise exception
except:
f = failure.Failure()
untrapped = self.assertRaises(ValueError, f.trap, OverflowError)
self.assertIs(exception, untrapped)
def test_trapRaisesSelf(self):
"""
If the wrapped C{Exception} is not a subclass of one of the
expected types, L{failure.Failure.trap} raises itself.
"""
if _PY3:
raise SkipTest(
"""
Only expected behaviour on Python 2.
@see U{http://twisted.readthedocs.io/en/latest/core/howto/python3.html#twisted-python-failure}
"""
)
exception = ValueError()
try:
raise exception
except:
f = failure.Failure()
untrapped = self.assertRaises(failure.Failure, f.trap, OverflowError)
self.assertIs(f, untrapped)
def test_failureValueFromFailure(self):
"""
A L{failure.Failure} constructed from another
L{failure.Failure} instance, has its C{value} property set to
the value of that L{failure.Failure} instance.
"""
exception = ValueError()
f1 = failure.Failure(exception)
f2 = failure.Failure(f1)
self.assertIs(f2.value, exception)
def test_failureValueFromFoundFailure(self):
"""
A L{failure.Failure} constructed without a C{exc_value}
argument, will search for an "original" C{Failure}, and if
found, its value will be used as the value for the new
C{Failure}.
"""
exception = ValueError()
f1 = failure.Failure(exception)
try:
f1.trap(OverflowError)
except:
f2 = failure.Failure()
self.assertIs(f2.value, exception)
def assertStartsWith(self, s, prefix):
"""
Assert that C{s} starts with a particular C{prefix}.
@param s: The input string.
@type s: C{str}
@param prefix: The string that C{s} should start with.
@type prefix: C{str}
"""
self.assertTrue(s.startswith(prefix),
'%r is not the start of %r' % (prefix, s))
def assertEndsWith(self, s, suffix):
"""
Assert that C{s} end with a particular C{suffix}.
@param s: The input string.
@type s: C{str}
@param suffix: The string that C{s} should end with.
@type suffix: C{str}
"""
self.assertTrue(s.endswith(suffix),
'%r is not the end of %r' % (suffix, s))
def assertTracebackFormat(self, tb, prefix, suffix):
"""
Assert that the C{tb} traceback contains a particular C{prefix} and
C{suffix}.
@param tb: The traceback string.
@type tb: C{str}
@param prefix: The string that C{tb} should start with.
@type prefix: C{str}
@param suffix: The string that C{tb} should end with.
@type suffix: C{str}
"""
self.assertStartsWith(tb, prefix)
self.assertEndsWith(tb, suffix)
def assertDetailedTraceback(self, captureVars=False, cleanFailure=False):
"""
Assert that L{printDetailedTraceback} produces and prints a detailed
traceback.
The detailed traceback consists of a header::
*--- Failure #20 ---
The body contains the stacktrace::
/twisted/trial/_synctest.py:1180: _run(...)
/twisted/python/util.py:1076: runWithWarningsSuppressed(...)
--- <exception caught here> ---
/twisted/test/test_failure.py:39: getDivisionFailure(...)
If C{captureVars} is enabled the body also includes a list of
globals and locals::
[ Locals ]
exampleLocalVar : 'xyz'
...
( Globals )
...
Or when C{captureVars} is disabled::
[Capture of Locals and Globals disabled (use captureVars=True)]
When C{cleanFailure} is enabled references to other objects are removed
and replaced with strings.
And finally the footer with the L{Failure}'s value::
exceptions.ZeroDivisionError: float division
*--- End of Failure #20 ---
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
@param cleanFailure: Enables L{Failure.cleanFailure}.
@type cleanFailure: C{bool}
"""
if captureVars:
exampleLocalVar = 'xyz'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure(captureVars=captureVars)
out = NativeStringIO()
if cleanFailure:
f.cleanFailure()
f.printDetailedTraceback(out)
tb = out.getvalue()
start = "*--- Failure #%d%s---\n" % (f.count,
(f.pickled and ' (pickled) ') or ' ')
end = "%s: %s\n*--- End of Failure #%s ---\n" % (reflect.qual(f.type),
reflect.safe_str(f.value), f.count)
self.assertTracebackFormat(tb, start, end)
# Variables are printed on lines with 2 leading spaces.
linesWithVars = [line for line in tb.splitlines()
if line.startswith(' ')]
if captureVars:
self.assertNotEqual([], linesWithVars)
if cleanFailure:
line = ' exampleLocalVar : "\'xyz\'"'
else:
line = " exampleLocalVar : 'xyz'"
self.assertIn(line, linesWithVars)
else:
self.assertEqual([], linesWithVars)
self.assertIn(' [Capture of Locals and Globals disabled (use '
'captureVars=True)]\n', tb)
def assertBriefTraceback(self, captureVars=False):
"""
Assert that L{printBriefTraceback} produces and prints a brief
traceback.
The brief traceback consists of a header::
Traceback: <type 'exceptions.ZeroDivisionError'>: float division
The body with the stacktrace::
/twisted/trial/_synctest.py:1180:_run
/twisted/python/util.py:1076:runWithWarningsSuppressed
And the footer::
--- <exception caught here> ---
/twisted/test/test_failure.py:39:getDivisionFailure
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
"""
if captureVars:
exampleLocalVar = 'abcde'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure()
out = NativeStringIO()
f.printBriefTraceback(out)
tb = out.getvalue()
stack = ''
for method, filename, lineno, localVars, globalVars in f.frames:
stack += '%s:%s:%s\n' % (filename, lineno, method)
zde = repr(ZeroDivisionError)
self.assertTracebackFormat(tb,
"Traceback: %s: " % (zde,),
"%s\n%s" % (failure.EXCEPTION_CAUGHT_HERE, stack))
if captureVars:
self.assertIsNone(re.search('exampleLocalVar.*abcde', tb))
def assertDefaultTraceback(self, captureVars=False):
"""
Assert that L{printTraceback} produces and prints a default traceback.
The default traceback consists of a header::
Traceback (most recent call last):
The body with traceback::
File "/twisted/trial/_synctest.py", line 1180, in _run
runWithWarningsSuppressed(suppress, method)
And the footer::
--- <exception caught here> ---
File "twisted/test/test_failure.py", line 39, in getDivisionFailure
1/0
exceptions.ZeroDivisionError: float division
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
"""
if captureVars:
exampleLocalVar = 'xyzzy'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure(captureVars=captureVars)
out = NativeStringIO()
f.printTraceback(out)
tb = out.getvalue()
stack = ''
for method, filename, lineno, localVars, globalVars in f.frames:
stack += ' File "%s", line %s, in %s\n' % (filename, lineno,
method)
stack += ' %s\n' % (linecache.getline(
filename, lineno).strip(),)
self.assertTracebackFormat(tb,
"Traceback (most recent call last):",
"%s\n%s%s: %s\n" % (failure.EXCEPTION_CAUGHT_HERE, stack,
reflect.qual(f.type), reflect.safe_str(f.value)))
if captureVars:
self.assertIsNone(re.search('exampleLocalVar.*xyzzy', tb))
def test_printDetailedTraceback(self):
"""
L{printDetailedTraceback} returns a detailed traceback including the
L{Failure}'s count.
"""
self.assertDetailedTraceback()
def test_printBriefTraceback(self):
"""
L{printBriefTraceback} returns a brief traceback.
"""
self.assertBriefTraceback()
def test_printTraceback(self):
"""
L{printTraceback} returns a traceback.
"""
self.assertDefaultTraceback()
def test_printDetailedTracebackCapturedVars(self):
"""
L{printDetailedTraceback} captures the locals and globals for its
stack frames and adds them to the traceback, when called on a
L{Failure} constructed with C{captureVars=True}.
"""
self.assertDetailedTraceback(captureVars=True)
def test_printBriefTracebackCapturedVars(self):
"""
L{printBriefTraceback} returns a brief traceback when called on a
L{Failure} constructed with C{captureVars=True}.
Local variables on the stack can not be seen in the resulting
traceback.
"""
self.assertBriefTraceback(captureVars=True)
def test_printTracebackCapturedVars(self):
"""
L{printTraceback} returns a traceback when called on a L{Failure}
constructed with C{captureVars=True}.
Local variables on the stack can not be seen in the resulting
traceback.
"""
self.assertDefaultTraceback(captureVars=True)
def test_printDetailedTracebackCapturedVarsCleaned(self):
"""
C{printDetailedTraceback} includes information about local variables on
the stack after C{cleanFailure} has been called.
"""
self.assertDetailedTraceback(captureVars=True, cleanFailure=True)
def test_invalidFormatFramesDetail(self):
"""
L{failure.format_frames} raises a L{ValueError} if the supplied
C{detail} level is unknown.
"""
self.assertRaises(ValueError, failure.format_frames, None, None,
detail='noisia')
def test_ExplictPass(self):
e = RuntimeError()
f = failure.Failure(e)
f.trap(RuntimeError)
self.assertEqual(f.value, e)
def _getInnermostFrameLine(self, f):
try:
f.raiseException()
except ZeroDivisionError:
tb = traceback.extract_tb(sys.exc_info()[2])
return tb[-1][-1]
else:
raise Exception(
"f.raiseException() didn't raise ZeroDivisionError!?")
def test_RaiseExceptionWithTB(self):
f = getDivisionFailure()
innerline = self._getInnermostFrameLine(f)
self.assertEqual(innerline, '1/0')
def test_stringExceptionConstruction(self):
"""
Constructing a C{Failure} with a string as its exception value raises
a C{TypeError}, as this is no longer supported as of Python 2.6.
"""
exc = self.assertRaises(TypeError, failure.Failure, "ono!")
self.assertIn("Strings are not supported by Failure", str(exc))
def test_ConstructionFails(self):
"""
Creating a Failure with no arguments causes it to try to discover the
current interpreter exception state. If no such state exists, creating
the Failure should raise a synchronous exception.
"""
if sys.version_info < (3, 0):
sys.exc_clear()
self.assertRaises(failure.NoCurrentExceptionError, failure.Failure)
def test_getTracebackObject(self):
"""
If the C{Failure} has not been cleaned, then C{getTracebackObject}
returns the traceback object that captured in its constructor.
"""
f = getDivisionFailure()
self.assertEqual(f.getTracebackObject(), f.tb)
def test_getTracebackObjectFromCaptureVars(self):
"""
C{captureVars=True} has no effect on the result of
C{getTracebackObject}.
"""
try:
1/0
except ZeroDivisionError:
noVarsFailure = failure.Failure()
varsFailure = failure.Failure(captureVars=True)
self.assertEqual(noVarsFailure.getTracebackObject(), varsFailure.tb)
def test_getTracebackObjectFromClean(self):
"""
If the Failure has been cleaned, then C{getTracebackObject} returns an
object that looks the same to L{traceback.extract_tb}.
"""
f = getDivisionFailure()
expected = traceback.extract_tb(f.getTracebackObject())
f.cleanFailure()
observed = traceback.extract_tb(f.getTracebackObject())
self.assertIsNotNone(expected)
self.assertEqual(expected, observed)
def test_getTracebackObjectFromCaptureVarsAndClean(self):
"""
If the Failure was created with captureVars, then C{getTracebackObject}
returns an object that looks the same to L{traceback.extract_tb}.
"""
f = getDivisionFailure(captureVars=True)
expected = traceback.extract_tb(f.getTracebackObject())
f.cleanFailure()
observed = traceback.extract_tb(f.getTracebackObject())
self.assertEqual(expected, observed)
def test_getTracebackObjectWithoutTraceback(self):
"""
L{failure.Failure}s need not be constructed with traceback objects. If
a C{Failure} has no traceback information at all, C{getTracebackObject}
just returns None.
None is a good value, because traceback.extract_tb(None) -> [].
"""
f = failure.Failure(Exception("some error"))
self.assertIsNone(f.getTracebackObject())
def test_tracebackFromExceptionInPython3(self):
"""
If a L{failure.Failure} is constructed with an exception but no
traceback in Python 3, the traceback will be extracted from the
exception's C{__traceback__} attribute.
"""
try:
1/0
except:
klass, exception, tb = sys.exc_info()
f = failure.Failure(exception)
self.assertIs(f.tb, tb)
def test_cleanFailureRemovesTracebackInPython3(self):
"""
L{failure.Failure.cleanFailure} sets the C{__traceback__} attribute of
the exception to L{None} in Python 3.
"""
f = getDivisionFailure()
self.assertIsNotNone(f.tb)
self.assertIs(f.value.__traceback__, f.tb)
f.cleanFailure()
self.assertIsNone(f.value.__traceback__)
if getattr(BaseException, "__traceback__", None) is None:
test_tracebackFromExceptionInPython3.skip = "Python 3 only."
test_cleanFailureRemovesTracebackInPython3.skip = "Python 3 only."
def test_repr(self):
"""
The C{repr} of a L{failure.Failure} shows the type and string
representation of the underlying exception.
"""
f = getDivisionFailure()
typeName = reflect.fullyQualifiedName(ZeroDivisionError)
self.assertEqual(
repr(f),
'<twisted.python.failure.Failure '
'%s: division by zero>' % (typeName,))
class BrokenStr(Exception):
"""
An exception class the instances of which cannot be presented as strings via
C{str}.
"""
def __str__(self):
# Could raise something else, but there's no point as yet.
raise self
class BrokenExceptionMetaclass(type):
"""
A metaclass for an exception type which cannot be presented as a string via
C{str}.
"""
def __str__(self):
raise ValueError("You cannot make a string out of me.")
class BrokenExceptionType(Exception, object):
"""
The aforementioned exception type which cnanot be presented as a string via
C{str}.
"""
__metaclass__ = BrokenExceptionMetaclass
class GetTracebackTests(SynchronousTestCase):
"""
Tests for L{Failure.getTraceback}.
"""
def _brokenValueTest(self, detail):
"""
Construct a L{Failure} with an exception that raises an exception from
its C{__str__} method and then call C{getTraceback} with the specified
detail and verify that it returns a string.
"""
x = BrokenStr()
f = failure.Failure(x)
traceback = f.getTraceback(detail=detail)
self.assertIsInstance(traceback, str)
def test_brokenValueBriefDetail(self):
"""
A L{Failure} might wrap an exception with a C{__str__} method which
raises an exception. In this case, calling C{getTraceback} on the
failure with the C{"brief"} detail does not raise an exception.
"""
self._brokenValueTest("brief")
def test_brokenValueDefaultDetail(self):
"""
Like test_brokenValueBriefDetail, but for the C{"default"} detail case.
"""
self._brokenValueTest("default")
def test_brokenValueVerboseDetail(self):
"""
Like test_brokenValueBriefDetail, but for the C{"default"} detail case.
"""
self._brokenValueTest("verbose")
def _brokenTypeTest(self, detail):
"""
Construct a L{Failure} with an exception type that raises an exception
from its C{__str__} method and then call C{getTraceback} with the
specified detail and verify that it returns a string.
"""
f = failure.Failure(BrokenExceptionType())
traceback = f.getTraceback(detail=detail)
self.assertIsInstance(traceback, str)
def test_brokenTypeBriefDetail(self):
"""
A L{Failure} might wrap an exception the type object of which has a
C{__str__} method which raises an exception. In this case, calling
C{getTraceback} on the failure with the C{"brief"} detail does not raise
an exception.
"""
self._brokenTypeTest("brief")
def test_brokenTypeDefaultDetail(self):
"""
Like test_brokenTypeBriefDetail, but for the C{"default"} detail case.
"""
self._brokenTypeTest("default")
def test_brokenTypeVerboseDetail(self):
"""
Like test_brokenTypeBriefDetail, but for the C{"verbose"} detail case.
"""
self._brokenTypeTest("verbose")
class FindFailureTests(SynchronousTestCase):
"""
Tests for functionality related to L{Failure._findFailure}.
"""
def test_findNoFailureInExceptionHandler(self):
"""
Within an exception handler, _findFailure should return
L{None} in case no Failure is associated with the current
exception.
"""
try:
1/0
except:
self.assertIsNone(failure.Failure._findFailure())
else:
self.fail("No exception raised from 1/0!?")
def test_findNoFailure(self):
"""
Outside of an exception handler, _findFailure should return None.
"""
if sys.version_info < (3, 0):
sys.exc_clear()
self.assertIsNone(sys.exc_info()[-1]) #environment sanity check
self.assertIsNone(failure.Failure._findFailure())
def test_findFailure(self):
"""
Within an exception handler, it should be possible to find the
original Failure that caused the current exception (if it was
caused by raiseException).
"""
f = getDivisionFailure()
f.cleanFailure()
try:
f.raiseException()
except:
self.assertEqual(failure.Failure._findFailure(), f)
else:
self.fail("No exception raised from raiseException!?")
def test_failureConstructionFindsOriginalFailure(self):
"""
When a Failure is constructed in the context of an exception
handler that is handling an exception raised by
raiseException, the new Failure should be chained to that
original Failure.
"""
f = getDivisionFailure()
f.cleanFailure()
try:
f.raiseException()
except:
newF = failure.Failure()
self.assertEqual(f.getTraceback(), newF.getTraceback())
else:
self.fail("No exception raised from raiseException!?")
def test_failureConstructionWithMungedStackSucceeds(self):
"""
Pyrex and Cython are known to insert fake stack frames so as to give
more Python-like tracebacks. These stack frames with empty code objects
should not break extraction of the exception.
"""
try:
raiser.raiseException()
except raiser.RaiserException:
f = failure.Failure()
self.assertTrue(f.check(raiser.RaiserException))
else:
self.fail("No exception raised from extension?!")
if raiser is None:
skipMsg = "raiser extension not available"
test_failureConstructionWithMungedStackSucceeds.skip = skipMsg
# On Python 3.5, extract_tb returns "FrameSummary" objects, which are almost
# like the old tuples. This being different does not affect the actual tests
# as we are testing that the input works, and that extract_tb returns something
# reasonable.
if sys.version_info < (3, 5):
_tb = lambda fn, lineno, name, text: (fn, lineno, name, text)
else:
from traceback import FrameSummary
_tb = lambda fn, lineno, name, text: FrameSummary(fn, lineno, name)
class FormattableTracebackTests(SynchronousTestCase):
"""
Whitebox tests that show that L{failure._Traceback} constructs objects that
can be used by L{traceback.extract_tb}.
If the objects can be used by L{traceback.extract_tb}, then they can be
formatted using L{traceback.format_tb} and friends.
"""
def test_singleFrame(self):
"""
A C{_Traceback} object constructed with a single frame should be able
to be passed to L{traceback.extract_tb}, and we should get a singleton
list containing a (filename, lineno, methodname, line) tuple.
"""
tb = failure._Traceback([['method', 'filename.py', 123, {}, {}]])
# Note that we don't need to test that extract_tb correctly extracts
# the line's contents. In this case, since filename.py doesn't exist,
# it will just use None.
self.assertEqual(traceback.extract_tb(tb),
[_tb('filename.py', 123, 'method', None)])
def test_manyFrames(self):
"""
A C{_Traceback} object constructed with multiple frames should be able
to be passed to L{traceback.extract_tb}, and we should get a list
containing a tuple for each frame.
"""
tb = failure._Traceback([
['method1', 'filename.py', 123, {}, {}],
['method2', 'filename.py', 235, {}, {}]])
self.assertEqual(traceback.extract_tb(tb),
[_tb('filename.py', 123, 'method1', None),
_tb('filename.py', 235, 'method2', None)])
class FrameAttributesTests(SynchronousTestCase):
"""
_Frame objects should possess some basic attributes that qualify them as
fake python Frame objects.
"""
def test_fakeFrameAttributes(self):
"""
L{_Frame} instances have the C{f_globals} and C{f_locals} attributes
bound to C{dict} instance. They also have the C{f_code} attribute
bound to something like a code object.
"""
frame = failure._Frame("dummyname", "dummyfilename")
self.assertIsInstance(frame.f_globals, dict)
self.assertIsInstance(frame.f_locals, dict)
self.assertIsInstance(frame.f_code, failure._Code)
class DebugModeTests(SynchronousTestCase):
"""
Failure's debug mode should allow jumping into the debugger.
"""
def setUp(self):
"""
Override pdb.post_mortem so we can make sure it's called.
"""
# Make sure any changes we make are reversed:
post_mortem = pdb.post_mortem
origInit = failure.Failure.__init__
def restore():
pdb.post_mortem = post_mortem
failure.Failure.__init__ = origInit
self.addCleanup(restore)
self.result = []
pdb.post_mortem = self.result.append
failure.startDebugMode()
def test_regularFailure(self):
"""
If startDebugMode() is called, calling Failure() will first call
pdb.post_mortem with the traceback.
"""
try:
1/0
except:
typ, exc, tb = sys.exc_info()
f = failure.Failure()
self.assertEqual(self.result, [tb])
self.assertFalse(f.captureVars)
def test_captureVars(self):
"""
If startDebugMode() is called, passing captureVars to Failure() will
not blow up.
"""
try:
1/0
except:
typ, exc, tb = sys.exc_info()
f = failure.Failure(captureVars=True)
self.assertEqual(self.result, [tb])
self.assertTrue(f.captureVars)
class ExtendedGeneratorTests(SynchronousTestCase):
"""
Tests C{failure.Failure} support for generator features added in Python 2.5
"""
def _throwIntoGenerator(self, f, g):
try:
f.throwExceptionIntoGenerator(g)
except StopIteration:
pass
else:
self.fail("throwExceptionIntoGenerator should have raised "
"StopIteration")
def test_throwExceptionIntoGenerator(self):
"""
It should be possible to throw the exception that a Failure
represents into a generator.
"""
stuff = []
def generator():
try:
yield
except:
stuff.append(sys.exc_info())
else:
self.fail("Yield should have yielded exception.")
g = generator()
f = getDivisionFailure()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(stuff[0][0], ZeroDivisionError)
self.assertIsInstance(stuff[0][1], ZeroDivisionError)
self.assertEqual(traceback.extract_tb(stuff[0][2])[-1][-1], "1/0")
def test_findFailureInGenerator(self):
"""
Within an exception handler, it should be possible to find the
original Failure that caused the current exception (if it was
caused by throwExceptionIntoGenerator).
"""
f = getDivisionFailure()
f.cleanFailure()
foundFailures = []
def generator():
try:
yield
except:
foundFailures.append(failure.Failure._findFailure())
else:
self.fail("No exception sent to generator")
g = generator()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(foundFailures, [f])
def test_failureConstructionFindsOriginalFailure(self):
"""
When a Failure is constructed in the context of an exception
handler that is handling an exception raised by
throwExceptionIntoGenerator, the new Failure should be chained to that
original Failure.
"""
f = getDivisionFailure()
f.cleanFailure()
newFailures = []
def generator():
try:
yield
except:
newFailures.append(failure.Failure())
else:
self.fail("No exception sent to generator")
g = generator()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(len(newFailures), 1)
self.assertEqual(newFailures[0].getTraceback(), f.getTraceback())
def test_ambiguousFailureInGenerator(self):
"""
When a generator reraises a different exception,
L{Failure._findFailure} inside the generator should find the reraised
exception rather than original one.
"""
def generator():
try:
try:
yield
except:
[][1]
except:
self.assertIsInstance(failure.Failure().value, IndexError)
g = generator()
next(g)
f = getDivisionFailure()
self._throwIntoGenerator(f, g)
def test_ambiguousFailureFromGenerator(self):
"""
When a generator reraises a different exception,
L{Failure._findFailure} above the generator should find the reraised
exception rather than original one.
"""
def generator():
try:
yield
except:
[][1]
g = generator()
next(g)
f = getDivisionFailure()
try:
self._throwIntoGenerator(f, g)
except:
self.assertIsInstance(failure.Failure().value, IndexError)
|
assertDefaultTraceback
|
Assert that L{printTraceback} produces and prints a default traceback.
The default traceback consists of a header::
Traceback (most recent call last):
The body with traceback::
File "/twisted/trial/_synctest.py", line 1180, in _run
runWithWarningsSuppressed(suppress, method)
And the footer::
--- <exception caught here> ---
File "twisted/test/test_failure.py", line 39, in getDivisionFailure
1/0
exceptions.ZeroDivisionError: float division
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for the L{twisted.python.failure} module.
"""
from __future__ import division, absolute_import
import re
import sys
import traceback
import pdb
import linecache
from twisted.python.compat import _PY3, NativeStringIO
from twisted.python import reflect
from twisted.python import failure
from twisted.trial.unittest import SkipTest, SynchronousTestCase
try:
from twisted.test import raiser
except ImportError:
raiser = None
def getDivisionFailure(*args, **kwargs):
"""
Make a C{Failure} of a divide-by-zero error.
@param args: Any C{*args} are passed to Failure's constructor.
@param kwargs: Any C{**kwargs} are passed to Failure's constructor.
"""
try:
1/0
except:
f = failure.Failure(*args, **kwargs)
return f
class FailureTests(SynchronousTestCase):
"""
Tests for L{failure.Failure}.
"""
def test_failAndTrap(self):
"""
Trapping a L{Failure}.
"""
try:
raise NotImplementedError('test')
except:
f = failure.Failure()
error = f.trap(SystemExit, RuntimeError)
self.assertEqual(error, RuntimeError)
self.assertEqual(f.type, NotImplementedError)
def test_trapRaisesWrappedException(self):
"""
If the wrapped C{Exception} is not a subclass of one of the
expected types, L{failure.Failure.trap} raises the wrapped
C{Exception}.
"""
if not _PY3:
raise SkipTest(
"""
Only expected behaviour on Python 3.
@see U{http://twisted.readthedocs.io/en/latest/core/howto/python3.html#twisted-python-failure}
"""
)
exception = ValueError()
try:
raise exception
except:
f = failure.Failure()
untrapped = self.assertRaises(ValueError, f.trap, OverflowError)
self.assertIs(exception, untrapped)
def test_trapRaisesSelf(self):
"""
If the wrapped C{Exception} is not a subclass of one of the
expected types, L{failure.Failure.trap} raises itself.
"""
if _PY3:
raise SkipTest(
"""
Only expected behaviour on Python 2.
@see U{http://twisted.readthedocs.io/en/latest/core/howto/python3.html#twisted-python-failure}
"""
)
exception = ValueError()
try:
raise exception
except:
f = failure.Failure()
untrapped = self.assertRaises(failure.Failure, f.trap, OverflowError)
self.assertIs(f, untrapped)
def test_failureValueFromFailure(self):
"""
A L{failure.Failure} constructed from another
L{failure.Failure} instance, has its C{value} property set to
the value of that L{failure.Failure} instance.
"""
exception = ValueError()
f1 = failure.Failure(exception)
f2 = failure.Failure(f1)
self.assertIs(f2.value, exception)
def test_failureValueFromFoundFailure(self):
"""
A L{failure.Failure} constructed without a C{exc_value}
argument, will search for an "original" C{Failure}, and if
found, its value will be used as the value for the new
C{Failure}.
"""
exception = ValueError()
f1 = failure.Failure(exception)
try:
f1.trap(OverflowError)
except:
f2 = failure.Failure()
self.assertIs(f2.value, exception)
def assertStartsWith(self, s, prefix):
"""
Assert that C{s} starts with a particular C{prefix}.
@param s: The input string.
@type s: C{str}
@param prefix: The string that C{s} should start with.
@type prefix: C{str}
"""
self.assertTrue(s.startswith(prefix),
'%r is not the start of %r' % (prefix, s))
def assertEndsWith(self, s, suffix):
"""
Assert that C{s} end with a particular C{suffix}.
@param s: The input string.
@type s: C{str}
@param suffix: The string that C{s} should end with.
@type suffix: C{str}
"""
self.assertTrue(s.endswith(suffix),
'%r is not the end of %r' % (suffix, s))
def assertTracebackFormat(self, tb, prefix, suffix):
"""
Assert that the C{tb} traceback contains a particular C{prefix} and
C{suffix}.
@param tb: The traceback string.
@type tb: C{str}
@param prefix: The string that C{tb} should start with.
@type prefix: C{str}
@param suffix: The string that C{tb} should end with.
@type suffix: C{str}
"""
self.assertStartsWith(tb, prefix)
self.assertEndsWith(tb, suffix)
def assertDetailedTraceback(self, captureVars=False, cleanFailure=False):
"""
Assert that L{printDetailedTraceback} produces and prints a detailed
traceback.
The detailed traceback consists of a header::
*--- Failure #20 ---
The body contains the stacktrace::
/twisted/trial/_synctest.py:1180: _run(...)
/twisted/python/util.py:1076: runWithWarningsSuppressed(...)
--- <exception caught here> ---
/twisted/test/test_failure.py:39: getDivisionFailure(...)
If C{captureVars} is enabled the body also includes a list of
globals and locals::
[ Locals ]
exampleLocalVar : 'xyz'
...
( Globals )
...
Or when C{captureVars} is disabled::
[Capture of Locals and Globals disabled (use captureVars=True)]
When C{cleanFailure} is enabled references to other objects are removed
and replaced with strings.
And finally the footer with the L{Failure}'s value::
exceptions.ZeroDivisionError: float division
*--- End of Failure #20 ---
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
@param cleanFailure: Enables L{Failure.cleanFailure}.
@type cleanFailure: C{bool}
"""
if captureVars:
exampleLocalVar = 'xyz'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure(captureVars=captureVars)
out = NativeStringIO()
if cleanFailure:
f.cleanFailure()
f.printDetailedTraceback(out)
tb = out.getvalue()
start = "*--- Failure #%d%s---\n" % (f.count,
(f.pickled and ' (pickled) ') or ' ')
end = "%s: %s\n*--- End of Failure #%s ---\n" % (reflect.qual(f.type),
reflect.safe_str(f.value), f.count)
self.assertTracebackFormat(tb, start, end)
# Variables are printed on lines with 2 leading spaces.
linesWithVars = [line for line in tb.splitlines()
if line.startswith(' ')]
if captureVars:
self.assertNotEqual([], linesWithVars)
if cleanFailure:
line = ' exampleLocalVar : "\'xyz\'"'
else:
line = " exampleLocalVar : 'xyz'"
self.assertIn(line, linesWithVars)
else:
self.assertEqual([], linesWithVars)
self.assertIn(' [Capture of Locals and Globals disabled (use '
'captureVars=True)]\n', tb)
def assertBriefTraceback(self, captureVars=False):
"""
Assert that L{printBriefTraceback} produces and prints a brief
traceback.
The brief traceback consists of a header::
Traceback: <type 'exceptions.ZeroDivisionError'>: float division
The body with the stacktrace::
/twisted/trial/_synctest.py:1180:_run
/twisted/python/util.py:1076:runWithWarningsSuppressed
And the footer::
--- <exception caught here> ---
/twisted/test/test_failure.py:39:getDivisionFailure
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
"""
if captureVars:
exampleLocalVar = 'abcde'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure()
out = NativeStringIO()
f.printBriefTraceback(out)
tb = out.getvalue()
stack = ''
for method, filename, lineno, localVars, globalVars in f.frames:
stack += '%s:%s:%s\n' % (filename, lineno, method)
zde = repr(ZeroDivisionError)
self.assertTracebackFormat(tb,
"Traceback: %s: " % (zde,),
"%s\n%s" % (failure.EXCEPTION_CAUGHT_HERE, stack))
if captureVars:
self.assertIsNone(re.search('exampleLocalVar.*abcde', tb))
# MASKED: assertDefaultTraceback function (lines 303-349)
def test_printDetailedTraceback(self):
"""
L{printDetailedTraceback} returns a detailed traceback including the
L{Failure}'s count.
"""
self.assertDetailedTraceback()
def test_printBriefTraceback(self):
"""
L{printBriefTraceback} returns a brief traceback.
"""
self.assertBriefTraceback()
def test_printTraceback(self):
"""
L{printTraceback} returns a traceback.
"""
self.assertDefaultTraceback()
def test_printDetailedTracebackCapturedVars(self):
"""
L{printDetailedTraceback} captures the locals and globals for its
stack frames and adds them to the traceback, when called on a
L{Failure} constructed with C{captureVars=True}.
"""
self.assertDetailedTraceback(captureVars=True)
def test_printBriefTracebackCapturedVars(self):
"""
L{printBriefTraceback} returns a brief traceback when called on a
L{Failure} constructed with C{captureVars=True}.
Local variables on the stack can not be seen in the resulting
traceback.
"""
self.assertBriefTraceback(captureVars=True)
def test_printTracebackCapturedVars(self):
"""
L{printTraceback} returns a traceback when called on a L{Failure}
constructed with C{captureVars=True}.
Local variables on the stack can not be seen in the resulting
traceback.
"""
self.assertDefaultTraceback(captureVars=True)
def test_printDetailedTracebackCapturedVarsCleaned(self):
"""
C{printDetailedTraceback} includes information about local variables on
the stack after C{cleanFailure} has been called.
"""
self.assertDetailedTraceback(captureVars=True, cleanFailure=True)
def test_invalidFormatFramesDetail(self):
"""
L{failure.format_frames} raises a L{ValueError} if the supplied
C{detail} level is unknown.
"""
self.assertRaises(ValueError, failure.format_frames, None, None,
detail='noisia')
def test_ExplictPass(self):
e = RuntimeError()
f = failure.Failure(e)
f.trap(RuntimeError)
self.assertEqual(f.value, e)
def _getInnermostFrameLine(self, f):
try:
f.raiseException()
except ZeroDivisionError:
tb = traceback.extract_tb(sys.exc_info()[2])
return tb[-1][-1]
else:
raise Exception(
"f.raiseException() didn't raise ZeroDivisionError!?")
def test_RaiseExceptionWithTB(self):
f = getDivisionFailure()
innerline = self._getInnermostFrameLine(f)
self.assertEqual(innerline, '1/0')
def test_stringExceptionConstruction(self):
"""
Constructing a C{Failure} with a string as its exception value raises
a C{TypeError}, as this is no longer supported as of Python 2.6.
"""
exc = self.assertRaises(TypeError, failure.Failure, "ono!")
self.assertIn("Strings are not supported by Failure", str(exc))
def test_ConstructionFails(self):
"""
Creating a Failure with no arguments causes it to try to discover the
current interpreter exception state. If no such state exists, creating
the Failure should raise a synchronous exception.
"""
if sys.version_info < (3, 0):
sys.exc_clear()
self.assertRaises(failure.NoCurrentExceptionError, failure.Failure)
def test_getTracebackObject(self):
"""
If the C{Failure} has not been cleaned, then C{getTracebackObject}
returns the traceback object that captured in its constructor.
"""
f = getDivisionFailure()
self.assertEqual(f.getTracebackObject(), f.tb)
def test_getTracebackObjectFromCaptureVars(self):
"""
C{captureVars=True} has no effect on the result of
C{getTracebackObject}.
"""
try:
1/0
except ZeroDivisionError:
noVarsFailure = failure.Failure()
varsFailure = failure.Failure(captureVars=True)
self.assertEqual(noVarsFailure.getTracebackObject(), varsFailure.tb)
def test_getTracebackObjectFromClean(self):
"""
If the Failure has been cleaned, then C{getTracebackObject} returns an
object that looks the same to L{traceback.extract_tb}.
"""
f = getDivisionFailure()
expected = traceback.extract_tb(f.getTracebackObject())
f.cleanFailure()
observed = traceback.extract_tb(f.getTracebackObject())
self.assertIsNotNone(expected)
self.assertEqual(expected, observed)
def test_getTracebackObjectFromCaptureVarsAndClean(self):
"""
If the Failure was created with captureVars, then C{getTracebackObject}
returns an object that looks the same to L{traceback.extract_tb}.
"""
f = getDivisionFailure(captureVars=True)
expected = traceback.extract_tb(f.getTracebackObject())
f.cleanFailure()
observed = traceback.extract_tb(f.getTracebackObject())
self.assertEqual(expected, observed)
def test_getTracebackObjectWithoutTraceback(self):
"""
L{failure.Failure}s need not be constructed with traceback objects. If
a C{Failure} has no traceback information at all, C{getTracebackObject}
just returns None.
None is a good value, because traceback.extract_tb(None) -> [].
"""
f = failure.Failure(Exception("some error"))
self.assertIsNone(f.getTracebackObject())
def test_tracebackFromExceptionInPython3(self):
"""
If a L{failure.Failure} is constructed with an exception but no
traceback in Python 3, the traceback will be extracted from the
exception's C{__traceback__} attribute.
"""
try:
1/0
except:
klass, exception, tb = sys.exc_info()
f = failure.Failure(exception)
self.assertIs(f.tb, tb)
def test_cleanFailureRemovesTracebackInPython3(self):
"""
L{failure.Failure.cleanFailure} sets the C{__traceback__} attribute of
the exception to L{None} in Python 3.
"""
f = getDivisionFailure()
self.assertIsNotNone(f.tb)
self.assertIs(f.value.__traceback__, f.tb)
f.cleanFailure()
self.assertIsNone(f.value.__traceback__)
if getattr(BaseException, "__traceback__", None) is None:
test_tracebackFromExceptionInPython3.skip = "Python 3 only."
test_cleanFailureRemovesTracebackInPython3.skip = "Python 3 only."
def test_repr(self):
"""
The C{repr} of a L{failure.Failure} shows the type and string
representation of the underlying exception.
"""
f = getDivisionFailure()
typeName = reflect.fullyQualifiedName(ZeroDivisionError)
self.assertEqual(
repr(f),
'<twisted.python.failure.Failure '
'%s: division by zero>' % (typeName,))
class BrokenStr(Exception):
"""
An exception class the instances of which cannot be presented as strings via
C{str}.
"""
def __str__(self):
# Could raise something else, but there's no point as yet.
raise self
class BrokenExceptionMetaclass(type):
"""
A metaclass for an exception type which cannot be presented as a string via
C{str}.
"""
def __str__(self):
raise ValueError("You cannot make a string out of me.")
class BrokenExceptionType(Exception, object):
"""
The aforementioned exception type which cnanot be presented as a string via
C{str}.
"""
__metaclass__ = BrokenExceptionMetaclass
class GetTracebackTests(SynchronousTestCase):
"""
Tests for L{Failure.getTraceback}.
"""
def _brokenValueTest(self, detail):
"""
Construct a L{Failure} with an exception that raises an exception from
its C{__str__} method and then call C{getTraceback} with the specified
detail and verify that it returns a string.
"""
x = BrokenStr()
f = failure.Failure(x)
traceback = f.getTraceback(detail=detail)
self.assertIsInstance(traceback, str)
def test_brokenValueBriefDetail(self):
"""
A L{Failure} might wrap an exception with a C{__str__} method which
raises an exception. In this case, calling C{getTraceback} on the
failure with the C{"brief"} detail does not raise an exception.
"""
self._brokenValueTest("brief")
def test_brokenValueDefaultDetail(self):
"""
Like test_brokenValueBriefDetail, but for the C{"default"} detail case.
"""
self._brokenValueTest("default")
def test_brokenValueVerboseDetail(self):
"""
Like test_brokenValueBriefDetail, but for the C{"default"} detail case.
"""
self._brokenValueTest("verbose")
def _brokenTypeTest(self, detail):
"""
Construct a L{Failure} with an exception type that raises an exception
from its C{__str__} method and then call C{getTraceback} with the
specified detail and verify that it returns a string.
"""
f = failure.Failure(BrokenExceptionType())
traceback = f.getTraceback(detail=detail)
self.assertIsInstance(traceback, str)
def test_brokenTypeBriefDetail(self):
"""
A L{Failure} might wrap an exception the type object of which has a
C{__str__} method which raises an exception. In this case, calling
C{getTraceback} on the failure with the C{"brief"} detail does not raise
an exception.
"""
self._brokenTypeTest("brief")
def test_brokenTypeDefaultDetail(self):
"""
Like test_brokenTypeBriefDetail, but for the C{"default"} detail case.
"""
self._brokenTypeTest("default")
def test_brokenTypeVerboseDetail(self):
"""
Like test_brokenTypeBriefDetail, but for the C{"verbose"} detail case.
"""
self._brokenTypeTest("verbose")
class FindFailureTests(SynchronousTestCase):
"""
Tests for functionality related to L{Failure._findFailure}.
"""
def test_findNoFailureInExceptionHandler(self):
"""
Within an exception handler, _findFailure should return
L{None} in case no Failure is associated with the current
exception.
"""
try:
1/0
except:
self.assertIsNone(failure.Failure._findFailure())
else:
self.fail("No exception raised from 1/0!?")
def test_findNoFailure(self):
"""
Outside of an exception handler, _findFailure should return None.
"""
if sys.version_info < (3, 0):
sys.exc_clear()
self.assertIsNone(sys.exc_info()[-1]) #environment sanity check
self.assertIsNone(failure.Failure._findFailure())
def test_findFailure(self):
"""
Within an exception handler, it should be possible to find the
original Failure that caused the current exception (if it was
caused by raiseException).
"""
f = getDivisionFailure()
f.cleanFailure()
try:
f.raiseException()
except:
self.assertEqual(failure.Failure._findFailure(), f)
else:
self.fail("No exception raised from raiseException!?")
def test_failureConstructionFindsOriginalFailure(self):
"""
When a Failure is constructed in the context of an exception
handler that is handling an exception raised by
raiseException, the new Failure should be chained to that
original Failure.
"""
f = getDivisionFailure()
f.cleanFailure()
try:
f.raiseException()
except:
newF = failure.Failure()
self.assertEqual(f.getTraceback(), newF.getTraceback())
else:
self.fail("No exception raised from raiseException!?")
def test_failureConstructionWithMungedStackSucceeds(self):
"""
Pyrex and Cython are known to insert fake stack frames so as to give
more Python-like tracebacks. These stack frames with empty code objects
should not break extraction of the exception.
"""
try:
raiser.raiseException()
except raiser.RaiserException:
f = failure.Failure()
self.assertTrue(f.check(raiser.RaiserException))
else:
self.fail("No exception raised from extension?!")
if raiser is None:
skipMsg = "raiser extension not available"
test_failureConstructionWithMungedStackSucceeds.skip = skipMsg
# On Python 3.5, extract_tb returns "FrameSummary" objects, which are almost
# like the old tuples. This being different does not affect the actual tests
# as we are testing that the input works, and that extract_tb returns something
# reasonable.
if sys.version_info < (3, 5):
_tb = lambda fn, lineno, name, text: (fn, lineno, name, text)
else:
from traceback import FrameSummary
_tb = lambda fn, lineno, name, text: FrameSummary(fn, lineno, name)
class FormattableTracebackTests(SynchronousTestCase):
"""
Whitebox tests that show that L{failure._Traceback} constructs objects that
can be used by L{traceback.extract_tb}.
If the objects can be used by L{traceback.extract_tb}, then they can be
formatted using L{traceback.format_tb} and friends.
"""
def test_singleFrame(self):
"""
A C{_Traceback} object constructed with a single frame should be able
to be passed to L{traceback.extract_tb}, and we should get a singleton
list containing a (filename, lineno, methodname, line) tuple.
"""
tb = failure._Traceback([['method', 'filename.py', 123, {}, {}]])
# Note that we don't need to test that extract_tb correctly extracts
# the line's contents. In this case, since filename.py doesn't exist,
# it will just use None.
self.assertEqual(traceback.extract_tb(tb),
[_tb('filename.py', 123, 'method', None)])
def test_manyFrames(self):
"""
A C{_Traceback} object constructed with multiple frames should be able
to be passed to L{traceback.extract_tb}, and we should get a list
containing a tuple for each frame.
"""
tb = failure._Traceback([
['method1', 'filename.py', 123, {}, {}],
['method2', 'filename.py', 235, {}, {}]])
self.assertEqual(traceback.extract_tb(tb),
[_tb('filename.py', 123, 'method1', None),
_tb('filename.py', 235, 'method2', None)])
class FrameAttributesTests(SynchronousTestCase):
"""
_Frame objects should possess some basic attributes that qualify them as
fake python Frame objects.
"""
def test_fakeFrameAttributes(self):
"""
L{_Frame} instances have the C{f_globals} and C{f_locals} attributes
bound to C{dict} instance. They also have the C{f_code} attribute
bound to something like a code object.
"""
frame = failure._Frame("dummyname", "dummyfilename")
self.assertIsInstance(frame.f_globals, dict)
self.assertIsInstance(frame.f_locals, dict)
self.assertIsInstance(frame.f_code, failure._Code)
class DebugModeTests(SynchronousTestCase):
"""
Failure's debug mode should allow jumping into the debugger.
"""
def setUp(self):
"""
Override pdb.post_mortem so we can make sure it's called.
"""
# Make sure any changes we make are reversed:
post_mortem = pdb.post_mortem
origInit = failure.Failure.__init__
def restore():
pdb.post_mortem = post_mortem
failure.Failure.__init__ = origInit
self.addCleanup(restore)
self.result = []
pdb.post_mortem = self.result.append
failure.startDebugMode()
def test_regularFailure(self):
"""
If startDebugMode() is called, calling Failure() will first call
pdb.post_mortem with the traceback.
"""
try:
1/0
except:
typ, exc, tb = sys.exc_info()
f = failure.Failure()
self.assertEqual(self.result, [tb])
self.assertFalse(f.captureVars)
def test_captureVars(self):
"""
If startDebugMode() is called, passing captureVars to Failure() will
not blow up.
"""
try:
1/0
except:
typ, exc, tb = sys.exc_info()
f = failure.Failure(captureVars=True)
self.assertEqual(self.result, [tb])
self.assertTrue(f.captureVars)
class ExtendedGeneratorTests(SynchronousTestCase):
"""
Tests C{failure.Failure} support for generator features added in Python 2.5
"""
def _throwIntoGenerator(self, f, g):
try:
f.throwExceptionIntoGenerator(g)
except StopIteration:
pass
else:
self.fail("throwExceptionIntoGenerator should have raised "
"StopIteration")
def test_throwExceptionIntoGenerator(self):
"""
It should be possible to throw the exception that a Failure
represents into a generator.
"""
stuff = []
def generator():
try:
yield
except:
stuff.append(sys.exc_info())
else:
self.fail("Yield should have yielded exception.")
g = generator()
f = getDivisionFailure()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(stuff[0][0], ZeroDivisionError)
self.assertIsInstance(stuff[0][1], ZeroDivisionError)
self.assertEqual(traceback.extract_tb(stuff[0][2])[-1][-1], "1/0")
def test_findFailureInGenerator(self):
"""
Within an exception handler, it should be possible to find the
original Failure that caused the current exception (if it was
caused by throwExceptionIntoGenerator).
"""
f = getDivisionFailure()
f.cleanFailure()
foundFailures = []
def generator():
try:
yield
except:
foundFailures.append(failure.Failure._findFailure())
else:
self.fail("No exception sent to generator")
g = generator()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(foundFailures, [f])
def test_failureConstructionFindsOriginalFailure(self):
"""
When a Failure is constructed in the context of an exception
handler that is handling an exception raised by
throwExceptionIntoGenerator, the new Failure should be chained to that
original Failure.
"""
f = getDivisionFailure()
f.cleanFailure()
newFailures = []
def generator():
try:
yield
except:
newFailures.append(failure.Failure())
else:
self.fail("No exception sent to generator")
g = generator()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(len(newFailures), 1)
self.assertEqual(newFailures[0].getTraceback(), f.getTraceback())
def test_ambiguousFailureInGenerator(self):
"""
When a generator reraises a different exception,
L{Failure._findFailure} inside the generator should find the reraised
exception rather than original one.
"""
def generator():
try:
try:
yield
except:
[][1]
except:
self.assertIsInstance(failure.Failure().value, IndexError)
g = generator()
next(g)
f = getDivisionFailure()
self._throwIntoGenerator(f, g)
def test_ambiguousFailureFromGenerator(self):
"""
When a generator reraises a different exception,
L{Failure._findFailure} above the generator should find the reraised
exception rather than original one.
"""
def generator():
try:
yield
except:
[][1]
g = generator()
next(g)
f = getDivisionFailure()
try:
self._throwIntoGenerator(f, g)
except:
self.assertIsInstance(failure.Failure().value, IndexError)
|
def assertDefaultTraceback(self, captureVars=False):
"""
Assert that L{printTraceback} produces and prints a default traceback.
The default traceback consists of a header::
Traceback (most recent call last):
The body with traceback::
File "/twisted/trial/_synctest.py", line 1180, in _run
runWithWarningsSuppressed(suppress, method)
And the footer::
--- <exception caught here> ---
File "twisted/test/test_failure.py", line 39, in getDivisionFailure
1/0
exceptions.ZeroDivisionError: float division
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
"""
if captureVars:
exampleLocalVar = 'xyzzy'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure(captureVars=captureVars)
out = NativeStringIO()
f.printTraceback(out)
tb = out.getvalue()
stack = ''
for method, filename, lineno, localVars, globalVars in f.frames:
stack += ' File "%s", line %s, in %s\n' % (filename, lineno,
method)
stack += ' %s\n' % (linecache.getline(
filename, lineno).strip(),)
self.assertTracebackFormat(tb,
"Traceback (most recent call last):",
"%s\n%s%s: %s\n" % (failure.EXCEPTION_CAUGHT_HERE, stack,
reflect.qual(f.type), reflect.safe_str(f.value)))
if captureVars:
self.assertIsNone(re.search('exampleLocalVar.*xyzzy', tb))
| 303 | 349 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for the L{twisted.python.failure} module.
"""
from __future__ import division, absolute_import
import re
import sys
import traceback
import pdb
import linecache
from twisted.python.compat import _PY3, NativeStringIO
from twisted.python import reflect
from twisted.python import failure
from twisted.trial.unittest import SkipTest, SynchronousTestCase
try:
from twisted.test import raiser
except ImportError:
raiser = None
def getDivisionFailure(*args, **kwargs):
"""
Make a C{Failure} of a divide-by-zero error.
@param args: Any C{*args} are passed to Failure's constructor.
@param kwargs: Any C{**kwargs} are passed to Failure's constructor.
"""
try:
1/0
except:
f = failure.Failure(*args, **kwargs)
return f
class FailureTests(SynchronousTestCase):
"""
Tests for L{failure.Failure}.
"""
def test_failAndTrap(self):
"""
Trapping a L{Failure}.
"""
try:
raise NotImplementedError('test')
except:
f = failure.Failure()
error = f.trap(SystemExit, RuntimeError)
self.assertEqual(error, RuntimeError)
self.assertEqual(f.type, NotImplementedError)
def test_trapRaisesWrappedException(self):
"""
If the wrapped C{Exception} is not a subclass of one of the
expected types, L{failure.Failure.trap} raises the wrapped
C{Exception}.
"""
if not _PY3:
raise SkipTest(
"""
Only expected behaviour on Python 3.
@see U{http://twisted.readthedocs.io/en/latest/core/howto/python3.html#twisted-python-failure}
"""
)
exception = ValueError()
try:
raise exception
except:
f = failure.Failure()
untrapped = self.assertRaises(ValueError, f.trap, OverflowError)
self.assertIs(exception, untrapped)
def test_trapRaisesSelf(self):
"""
If the wrapped C{Exception} is not a subclass of one of the
expected types, L{failure.Failure.trap} raises itself.
"""
if _PY3:
raise SkipTest(
"""
Only expected behaviour on Python 2.
@see U{http://twisted.readthedocs.io/en/latest/core/howto/python3.html#twisted-python-failure}
"""
)
exception = ValueError()
try:
raise exception
except:
f = failure.Failure()
untrapped = self.assertRaises(failure.Failure, f.trap, OverflowError)
self.assertIs(f, untrapped)
def test_failureValueFromFailure(self):
"""
A L{failure.Failure} constructed from another
L{failure.Failure} instance, has its C{value} property set to
the value of that L{failure.Failure} instance.
"""
exception = ValueError()
f1 = failure.Failure(exception)
f2 = failure.Failure(f1)
self.assertIs(f2.value, exception)
def test_failureValueFromFoundFailure(self):
"""
A L{failure.Failure} constructed without a C{exc_value}
argument, will search for an "original" C{Failure}, and if
found, its value will be used as the value for the new
C{Failure}.
"""
exception = ValueError()
f1 = failure.Failure(exception)
try:
f1.trap(OverflowError)
except:
f2 = failure.Failure()
self.assertIs(f2.value, exception)
def assertStartsWith(self, s, prefix):
"""
Assert that C{s} starts with a particular C{prefix}.
@param s: The input string.
@type s: C{str}
@param prefix: The string that C{s} should start with.
@type prefix: C{str}
"""
self.assertTrue(s.startswith(prefix),
'%r is not the start of %r' % (prefix, s))
def assertEndsWith(self, s, suffix):
"""
Assert that C{s} end with a particular C{suffix}.
@param s: The input string.
@type s: C{str}
@param suffix: The string that C{s} should end with.
@type suffix: C{str}
"""
self.assertTrue(s.endswith(suffix),
'%r is not the end of %r' % (suffix, s))
def assertTracebackFormat(self, tb, prefix, suffix):
"""
Assert that the C{tb} traceback contains a particular C{prefix} and
C{suffix}.
@param tb: The traceback string.
@type tb: C{str}
@param prefix: The string that C{tb} should start with.
@type prefix: C{str}
@param suffix: The string that C{tb} should end with.
@type suffix: C{str}
"""
self.assertStartsWith(tb, prefix)
self.assertEndsWith(tb, suffix)
def assertDetailedTraceback(self, captureVars=False, cleanFailure=False):
"""
Assert that L{printDetailedTraceback} produces and prints a detailed
traceback.
The detailed traceback consists of a header::
*--- Failure #20 ---
The body contains the stacktrace::
/twisted/trial/_synctest.py:1180: _run(...)
/twisted/python/util.py:1076: runWithWarningsSuppressed(...)
--- <exception caught here> ---
/twisted/test/test_failure.py:39: getDivisionFailure(...)
If C{captureVars} is enabled the body also includes a list of
globals and locals::
[ Locals ]
exampleLocalVar : 'xyz'
...
( Globals )
...
Or when C{captureVars} is disabled::
[Capture of Locals and Globals disabled (use captureVars=True)]
When C{cleanFailure} is enabled references to other objects are removed
and replaced with strings.
And finally the footer with the L{Failure}'s value::
exceptions.ZeroDivisionError: float division
*--- End of Failure #20 ---
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
@param cleanFailure: Enables L{Failure.cleanFailure}.
@type cleanFailure: C{bool}
"""
if captureVars:
exampleLocalVar = 'xyz'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure(captureVars=captureVars)
out = NativeStringIO()
if cleanFailure:
f.cleanFailure()
f.printDetailedTraceback(out)
tb = out.getvalue()
start = "*--- Failure #%d%s---\n" % (f.count,
(f.pickled and ' (pickled) ') or ' ')
end = "%s: %s\n*--- End of Failure #%s ---\n" % (reflect.qual(f.type),
reflect.safe_str(f.value), f.count)
self.assertTracebackFormat(tb, start, end)
# Variables are printed on lines with 2 leading spaces.
linesWithVars = [line for line in tb.splitlines()
if line.startswith(' ')]
if captureVars:
self.assertNotEqual([], linesWithVars)
if cleanFailure:
line = ' exampleLocalVar : "\'xyz\'"'
else:
line = " exampleLocalVar : 'xyz'"
self.assertIn(line, linesWithVars)
else:
self.assertEqual([], linesWithVars)
self.assertIn(' [Capture of Locals and Globals disabled (use '
'captureVars=True)]\n', tb)
def assertBriefTraceback(self, captureVars=False):
"""
Assert that L{printBriefTraceback} produces and prints a brief
traceback.
The brief traceback consists of a header::
Traceback: <type 'exceptions.ZeroDivisionError'>: float division
The body with the stacktrace::
/twisted/trial/_synctest.py:1180:_run
/twisted/python/util.py:1076:runWithWarningsSuppressed
And the footer::
--- <exception caught here> ---
/twisted/test/test_failure.py:39:getDivisionFailure
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
"""
if captureVars:
exampleLocalVar = 'abcde'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure()
out = NativeStringIO()
f.printBriefTraceback(out)
tb = out.getvalue()
stack = ''
for method, filename, lineno, localVars, globalVars in f.frames:
stack += '%s:%s:%s\n' % (filename, lineno, method)
zde = repr(ZeroDivisionError)
self.assertTracebackFormat(tb,
"Traceback: %s: " % (zde,),
"%s\n%s" % (failure.EXCEPTION_CAUGHT_HERE, stack))
if captureVars:
self.assertIsNone(re.search('exampleLocalVar.*abcde', tb))
def assertDefaultTraceback(self, captureVars=False):
"""
Assert that L{printTraceback} produces and prints a default traceback.
The default traceback consists of a header::
Traceback (most recent call last):
The body with traceback::
File "/twisted/trial/_synctest.py", line 1180, in _run
runWithWarningsSuppressed(suppress, method)
And the footer::
--- <exception caught here> ---
File "twisted/test/test_failure.py", line 39, in getDivisionFailure
1/0
exceptions.ZeroDivisionError: float division
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
"""
if captureVars:
exampleLocalVar = 'xyzzy'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure(captureVars=captureVars)
out = NativeStringIO()
f.printTraceback(out)
tb = out.getvalue()
stack = ''
for method, filename, lineno, localVars, globalVars in f.frames:
stack += ' File "%s", line %s, in %s\n' % (filename, lineno,
method)
stack += ' %s\n' % (linecache.getline(
filename, lineno).strip(),)
self.assertTracebackFormat(tb,
"Traceback (most recent call last):",
"%s\n%s%s: %s\n" % (failure.EXCEPTION_CAUGHT_HERE, stack,
reflect.qual(f.type), reflect.safe_str(f.value)))
if captureVars:
self.assertIsNone(re.search('exampleLocalVar.*xyzzy', tb))
def test_printDetailedTraceback(self):
"""
L{printDetailedTraceback} returns a detailed traceback including the
L{Failure}'s count.
"""
self.assertDetailedTraceback()
def test_printBriefTraceback(self):
"""
L{printBriefTraceback} returns a brief traceback.
"""
self.assertBriefTraceback()
def test_printTraceback(self):
"""
L{printTraceback} returns a traceback.
"""
self.assertDefaultTraceback()
def test_printDetailedTracebackCapturedVars(self):
"""
L{printDetailedTraceback} captures the locals and globals for its
stack frames and adds them to the traceback, when called on a
L{Failure} constructed with C{captureVars=True}.
"""
self.assertDetailedTraceback(captureVars=True)
def test_printBriefTracebackCapturedVars(self):
"""
L{printBriefTraceback} returns a brief traceback when called on a
L{Failure} constructed with C{captureVars=True}.
Local variables on the stack can not be seen in the resulting
traceback.
"""
self.assertBriefTraceback(captureVars=True)
def test_printTracebackCapturedVars(self):
"""
L{printTraceback} returns a traceback when called on a L{Failure}
constructed with C{captureVars=True}.
Local variables on the stack can not be seen in the resulting
traceback.
"""
self.assertDefaultTraceback(captureVars=True)
def test_printDetailedTracebackCapturedVarsCleaned(self):
"""
C{printDetailedTraceback} includes information about local variables on
the stack after C{cleanFailure} has been called.
"""
self.assertDetailedTraceback(captureVars=True, cleanFailure=True)
def test_invalidFormatFramesDetail(self):
"""
L{failure.format_frames} raises a L{ValueError} if the supplied
C{detail} level is unknown.
"""
self.assertRaises(ValueError, failure.format_frames, None, None,
detail='noisia')
def test_ExplictPass(self):
e = RuntimeError()
f = failure.Failure(e)
f.trap(RuntimeError)
self.assertEqual(f.value, e)
def _getInnermostFrameLine(self, f):
try:
f.raiseException()
except ZeroDivisionError:
tb = traceback.extract_tb(sys.exc_info()[2])
return tb[-1][-1]
else:
raise Exception(
"f.raiseException() didn't raise ZeroDivisionError!?")
def test_RaiseExceptionWithTB(self):
f = getDivisionFailure()
innerline = self._getInnermostFrameLine(f)
self.assertEqual(innerline, '1/0')
def test_stringExceptionConstruction(self):
"""
Constructing a C{Failure} with a string as its exception value raises
a C{TypeError}, as this is no longer supported as of Python 2.6.
"""
exc = self.assertRaises(TypeError, failure.Failure, "ono!")
self.assertIn("Strings are not supported by Failure", str(exc))
def test_ConstructionFails(self):
"""
Creating a Failure with no arguments causes it to try to discover the
current interpreter exception state. If no such state exists, creating
the Failure should raise a synchronous exception.
"""
if sys.version_info < (3, 0):
sys.exc_clear()
self.assertRaises(failure.NoCurrentExceptionError, failure.Failure)
def test_getTracebackObject(self):
"""
If the C{Failure} has not been cleaned, then C{getTracebackObject}
returns the traceback object that captured in its constructor.
"""
f = getDivisionFailure()
self.assertEqual(f.getTracebackObject(), f.tb)
def test_getTracebackObjectFromCaptureVars(self):
"""
C{captureVars=True} has no effect on the result of
C{getTracebackObject}.
"""
try:
1/0
except ZeroDivisionError:
noVarsFailure = failure.Failure()
varsFailure = failure.Failure(captureVars=True)
self.assertEqual(noVarsFailure.getTracebackObject(), varsFailure.tb)
def test_getTracebackObjectFromClean(self):
"""
If the Failure has been cleaned, then C{getTracebackObject} returns an
object that looks the same to L{traceback.extract_tb}.
"""
f = getDivisionFailure()
expected = traceback.extract_tb(f.getTracebackObject())
f.cleanFailure()
observed = traceback.extract_tb(f.getTracebackObject())
self.assertIsNotNone(expected)
self.assertEqual(expected, observed)
def test_getTracebackObjectFromCaptureVarsAndClean(self):
"""
If the Failure was created with captureVars, then C{getTracebackObject}
returns an object that looks the same to L{traceback.extract_tb}.
"""
f = getDivisionFailure(captureVars=True)
expected = traceback.extract_tb(f.getTracebackObject())
f.cleanFailure()
observed = traceback.extract_tb(f.getTracebackObject())
self.assertEqual(expected, observed)
def test_getTracebackObjectWithoutTraceback(self):
"""
L{failure.Failure}s need not be constructed with traceback objects. If
a C{Failure} has no traceback information at all, C{getTracebackObject}
just returns None.
None is a good value, because traceback.extract_tb(None) -> [].
"""
f = failure.Failure(Exception("some error"))
self.assertIsNone(f.getTracebackObject())
def test_tracebackFromExceptionInPython3(self):
"""
If a L{failure.Failure} is constructed with an exception but no
traceback in Python 3, the traceback will be extracted from the
exception's C{__traceback__} attribute.
"""
try:
1/0
except:
klass, exception, tb = sys.exc_info()
f = failure.Failure(exception)
self.assertIs(f.tb, tb)
def test_cleanFailureRemovesTracebackInPython3(self):
"""
L{failure.Failure.cleanFailure} sets the C{__traceback__} attribute of
the exception to L{None} in Python 3.
"""
f = getDivisionFailure()
self.assertIsNotNone(f.tb)
self.assertIs(f.value.__traceback__, f.tb)
f.cleanFailure()
self.assertIsNone(f.value.__traceback__)
if getattr(BaseException, "__traceback__", None) is None:
test_tracebackFromExceptionInPython3.skip = "Python 3 only."
test_cleanFailureRemovesTracebackInPython3.skip = "Python 3 only."
def test_repr(self):
"""
The C{repr} of a L{failure.Failure} shows the type and string
representation of the underlying exception.
"""
f = getDivisionFailure()
typeName = reflect.fullyQualifiedName(ZeroDivisionError)
self.assertEqual(
repr(f),
'<twisted.python.failure.Failure '
'%s: division by zero>' % (typeName,))
class BrokenStr(Exception):
"""
An exception class the instances of which cannot be presented as strings via
C{str}.
"""
def __str__(self):
# Could raise something else, but there's no point as yet.
raise self
class BrokenExceptionMetaclass(type):
"""
A metaclass for an exception type which cannot be presented as a string via
C{str}.
"""
def __str__(self):
raise ValueError("You cannot make a string out of me.")
class BrokenExceptionType(Exception, object):
"""
The aforementioned exception type which cnanot be presented as a string via
C{str}.
"""
__metaclass__ = BrokenExceptionMetaclass
class GetTracebackTests(SynchronousTestCase):
"""
Tests for L{Failure.getTraceback}.
"""
def _brokenValueTest(self, detail):
"""
Construct a L{Failure} with an exception that raises an exception from
its C{__str__} method and then call C{getTraceback} with the specified
detail and verify that it returns a string.
"""
x = BrokenStr()
f = failure.Failure(x)
traceback = f.getTraceback(detail=detail)
self.assertIsInstance(traceback, str)
def test_brokenValueBriefDetail(self):
"""
A L{Failure} might wrap an exception with a C{__str__} method which
raises an exception. In this case, calling C{getTraceback} on the
failure with the C{"brief"} detail does not raise an exception.
"""
self._brokenValueTest("brief")
def test_brokenValueDefaultDetail(self):
"""
Like test_brokenValueBriefDetail, but for the C{"default"} detail case.
"""
self._brokenValueTest("default")
def test_brokenValueVerboseDetail(self):
"""
Like test_brokenValueBriefDetail, but for the C{"default"} detail case.
"""
self._brokenValueTest("verbose")
def _brokenTypeTest(self, detail):
"""
Construct a L{Failure} with an exception type that raises an exception
from its C{__str__} method and then call C{getTraceback} with the
specified detail and verify that it returns a string.
"""
f = failure.Failure(BrokenExceptionType())
traceback = f.getTraceback(detail=detail)
self.assertIsInstance(traceback, str)
def test_brokenTypeBriefDetail(self):
"""
A L{Failure} might wrap an exception the type object of which has a
C{__str__} method which raises an exception. In this case, calling
C{getTraceback} on the failure with the C{"brief"} detail does not raise
an exception.
"""
self._brokenTypeTest("brief")
def test_brokenTypeDefaultDetail(self):
"""
Like test_brokenTypeBriefDetail, but for the C{"default"} detail case.
"""
self._brokenTypeTest("default")
def test_brokenTypeVerboseDetail(self):
"""
Like test_brokenTypeBriefDetail, but for the C{"verbose"} detail case.
"""
self._brokenTypeTest("verbose")
class FindFailureTests(SynchronousTestCase):
"""
Tests for functionality related to L{Failure._findFailure}.
"""
def test_findNoFailureInExceptionHandler(self):
"""
Within an exception handler, _findFailure should return
L{None} in case no Failure is associated with the current
exception.
"""
try:
1/0
except:
self.assertIsNone(failure.Failure._findFailure())
else:
self.fail("No exception raised from 1/0!?")
def test_findNoFailure(self):
"""
Outside of an exception handler, _findFailure should return None.
"""
if sys.version_info < (3, 0):
sys.exc_clear()
self.assertIsNone(sys.exc_info()[-1]) #environment sanity check
self.assertIsNone(failure.Failure._findFailure())
def test_findFailure(self):
"""
Within an exception handler, it should be possible to find the
original Failure that caused the current exception (if it was
caused by raiseException).
"""
f = getDivisionFailure()
f.cleanFailure()
try:
f.raiseException()
except:
self.assertEqual(failure.Failure._findFailure(), f)
else:
self.fail("No exception raised from raiseException!?")
def test_failureConstructionFindsOriginalFailure(self):
"""
When a Failure is constructed in the context of an exception
handler that is handling an exception raised by
raiseException, the new Failure should be chained to that
original Failure.
"""
f = getDivisionFailure()
f.cleanFailure()
try:
f.raiseException()
except:
newF = failure.Failure()
self.assertEqual(f.getTraceback(), newF.getTraceback())
else:
self.fail("No exception raised from raiseException!?")
def test_failureConstructionWithMungedStackSucceeds(self):
"""
Pyrex and Cython are known to insert fake stack frames so as to give
more Python-like tracebacks. These stack frames with empty code objects
should not break extraction of the exception.
"""
try:
raiser.raiseException()
except raiser.RaiserException:
f = failure.Failure()
self.assertTrue(f.check(raiser.RaiserException))
else:
self.fail("No exception raised from extension?!")
if raiser is None:
skipMsg = "raiser extension not available"
test_failureConstructionWithMungedStackSucceeds.skip = skipMsg
# On Python 3.5, extract_tb returns "FrameSummary" objects, which are almost
# like the old tuples. This being different does not affect the actual tests
# as we are testing that the input works, and that extract_tb returns something
# reasonable.
if sys.version_info < (3, 5):
_tb = lambda fn, lineno, name, text: (fn, lineno, name, text)
else:
from traceback import FrameSummary
_tb = lambda fn, lineno, name, text: FrameSummary(fn, lineno, name)
class FormattableTracebackTests(SynchronousTestCase):
"""
Whitebox tests that show that L{failure._Traceback} constructs objects that
can be used by L{traceback.extract_tb}.
If the objects can be used by L{traceback.extract_tb}, then they can be
formatted using L{traceback.format_tb} and friends.
"""
def test_singleFrame(self):
"""
A C{_Traceback} object constructed with a single frame should be able
to be passed to L{traceback.extract_tb}, and we should get a singleton
list containing a (filename, lineno, methodname, line) tuple.
"""
tb = failure._Traceback([['method', 'filename.py', 123, {}, {}]])
# Note that we don't need to test that extract_tb correctly extracts
# the line's contents. In this case, since filename.py doesn't exist,
# it will just use None.
self.assertEqual(traceback.extract_tb(tb),
[_tb('filename.py', 123, 'method', None)])
def test_manyFrames(self):
"""
A C{_Traceback} object constructed with multiple frames should be able
to be passed to L{traceback.extract_tb}, and we should get a list
containing a tuple for each frame.
"""
tb = failure._Traceback([
['method1', 'filename.py', 123, {}, {}],
['method2', 'filename.py', 235, {}, {}]])
self.assertEqual(traceback.extract_tb(tb),
[_tb('filename.py', 123, 'method1', None),
_tb('filename.py', 235, 'method2', None)])
class FrameAttributesTests(SynchronousTestCase):
"""
_Frame objects should possess some basic attributes that qualify them as
fake python Frame objects.
"""
def test_fakeFrameAttributes(self):
"""
L{_Frame} instances have the C{f_globals} and C{f_locals} attributes
bound to C{dict} instance. They also have the C{f_code} attribute
bound to something like a code object.
"""
frame = failure._Frame("dummyname", "dummyfilename")
self.assertIsInstance(frame.f_globals, dict)
self.assertIsInstance(frame.f_locals, dict)
self.assertIsInstance(frame.f_code, failure._Code)
class DebugModeTests(SynchronousTestCase):
"""
Failure's debug mode should allow jumping into the debugger.
"""
def setUp(self):
"""
Override pdb.post_mortem so we can make sure it's called.
"""
# Make sure any changes we make are reversed:
post_mortem = pdb.post_mortem
origInit = failure.Failure.__init__
def restore():
pdb.post_mortem = post_mortem
failure.Failure.__init__ = origInit
self.addCleanup(restore)
self.result = []
pdb.post_mortem = self.result.append
failure.startDebugMode()
def test_regularFailure(self):
"""
If startDebugMode() is called, calling Failure() will first call
pdb.post_mortem with the traceback.
"""
try:
1/0
except:
typ, exc, tb = sys.exc_info()
f = failure.Failure()
self.assertEqual(self.result, [tb])
self.assertFalse(f.captureVars)
def test_captureVars(self):
"""
If startDebugMode() is called, passing captureVars to Failure() will
not blow up.
"""
try:
1/0
except:
typ, exc, tb = sys.exc_info()
f = failure.Failure(captureVars=True)
self.assertEqual(self.result, [tb])
self.assertTrue(f.captureVars)
class ExtendedGeneratorTests(SynchronousTestCase):
"""
Tests C{failure.Failure} support for generator features added in Python 2.5
"""
def _throwIntoGenerator(self, f, g):
try:
f.throwExceptionIntoGenerator(g)
except StopIteration:
pass
else:
self.fail("throwExceptionIntoGenerator should have raised "
"StopIteration")
def test_throwExceptionIntoGenerator(self):
"""
It should be possible to throw the exception that a Failure
represents into a generator.
"""
stuff = []
def generator():
try:
yield
except:
stuff.append(sys.exc_info())
else:
self.fail("Yield should have yielded exception.")
g = generator()
f = getDivisionFailure()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(stuff[0][0], ZeroDivisionError)
self.assertIsInstance(stuff[0][1], ZeroDivisionError)
self.assertEqual(traceback.extract_tb(stuff[0][2])[-1][-1], "1/0")
def test_findFailureInGenerator(self):
"""
Within an exception handler, it should be possible to find the
original Failure that caused the current exception (if it was
caused by throwExceptionIntoGenerator).
"""
f = getDivisionFailure()
f.cleanFailure()
foundFailures = []
def generator():
try:
yield
except:
foundFailures.append(failure.Failure._findFailure())
else:
self.fail("No exception sent to generator")
g = generator()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(foundFailures, [f])
def test_failureConstructionFindsOriginalFailure(self):
"""
When a Failure is constructed in the context of an exception
handler that is handling an exception raised by
throwExceptionIntoGenerator, the new Failure should be chained to that
original Failure.
"""
f = getDivisionFailure()
f.cleanFailure()
newFailures = []
def generator():
try:
yield
except:
newFailures.append(failure.Failure())
else:
self.fail("No exception sent to generator")
g = generator()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(len(newFailures), 1)
self.assertEqual(newFailures[0].getTraceback(), f.getTraceback())
def test_ambiguousFailureInGenerator(self):
"""
When a generator reraises a different exception,
L{Failure._findFailure} inside the generator should find the reraised
exception rather than original one.
"""
def generator():
try:
try:
yield
except:
[][1]
except:
self.assertIsInstance(failure.Failure().value, IndexError)
g = generator()
next(g)
f = getDivisionFailure()
self._throwIntoGenerator(f, g)
def test_ambiguousFailureFromGenerator(self):
"""
When a generator reraises a different exception,
L{Failure._findFailure} above the generator should find the reraised
exception rather than original one.
"""
def generator():
try:
yield
except:
[][1]
g = generator()
next(g)
f = getDivisionFailure()
try:
self._throwIntoGenerator(f, g)
except:
self.assertIsInstance(failure.Failure().value, IndexError)
|
add_event_callback
|
Adds a callback for an event which is called internally by apps.
Args:
name (str): Name of the app.
namespace (str): Namespace of the event.
cb: Callback function.
event (str): Name of the event.
**kwargs: List of values to filter on, and additional arguments to pass to the callback.
Returns:
``None`` or the reference to the callback handle.
|
"""Module to handle all events within AppDaemon."""
import uuid
from copy import deepcopy
import traceback
import datetime
from appdaemon.appdaemon import AppDaemon
import appdaemon.utils as utils
class Events:
"""Encapsulate event handling."""
def __init__(self, ad: AppDaemon):
"""Constructor.
Args:
ad: Reference to the AppDaemon object
"""
self.AD = ad
self.logger = ad.logging.get_child("_events")
#
# Events
#
# MASKED: add_event_callback function (lines 28-95)
async def cancel_event_callback(self, name, handle):
"""Cancels an event callback.
Args:
name (str): Name of the app or module.
handle: Previously supplied callback handle for the callback.
Returns:
None.
"""
executed = False
async with self.AD.callbacks.callbacks_lock:
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
del self.AD.callbacks.callbacks[name][handle]
await self.AD.state.remove_entity("admin", "event_callback.{}".format(handle))
executed = True
if name in self.AD.callbacks.callbacks and self.AD.callbacks.callbacks[name] == {}:
del self.AD.callbacks.callbacks[name]
if not executed:
self.logger.warning(
"Invalid callback handle '{}' in cancel_event_callback() from app {}".format(handle, name)
)
return executed
async def info_event_callback(self, name, handle):
"""Gets the information of an event callback.
Args:
name (str): Name of the app or subsystem.
handle: Previously supplied handle for the callback.
Returns:
A dictionary of callback entries or rise a ``ValueError`` if an invalid handle is provided.
"""
async with self.AD.callbacks.callbacks_lock:
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][handle]
return callback["event"], callback["kwargs"].copy()
else:
raise ValueError("Invalid handle: {}".format(handle))
async def fire_event(self, namespace, event, **kwargs):
"""Fires an event.
If the namespace does not have a plugin associated with it, the event will be fired locally.
If a plugin is associated, the firing of the event will be delegated to the plugin, under the
understanding that when the event is fired, the plugin will notify appdaemon that it occurred,
usually via the system the plugin is communicating with.
Args:
namespace (str): Namespace for the event to be fired in.
event (str): Name of the event.
**kwargs: Arguments to associate with the event.
Returns:
None.
"""
self.logger.debug("fire_plugin_event() %s %s %s", namespace, event, kwargs)
plugin = await self.AD.plugins.get_plugin_object(namespace)
if hasattr(plugin, "fire_plugin_event"):
# We assume that the event will come back to us via the plugin
await plugin.fire_plugin_event(event, namespace, **kwargs)
else:
# Just fire the event locally
await self.AD.events.process_event(namespace, {"event_type": event, "data": kwargs})
async def process_event(self, namespace, data):
"""Processes an event that has been received either locally or from a plugin.
Args:
namespace (str): Namespace the event was fired in.
data: Data associated with the event.
Returns:
None.
"""
try:
# if data["event_type"] == "__AD_ENTITY_REMOVED":
# print("process event")
self.logger.debug("Event type:%s:", data["event_type"])
self.logger.debug(data["data"])
# Kick the scheduler so it updates it's clock
if self.AD.sched is not None and self.AD.sched.realtime is False and namespace != "admin":
await self.AD.sched.kick()
if data["event_type"] == "state_changed":
if "entity_id" in data["data"] and "new_state" in data["data"]:
if data["data"]["new_state"] is None:
# most likely it is a deleted entity
return
entity_id = data["data"]["entity_id"]
self.AD.state.set_state_simple(namespace, entity_id, data["data"]["new_state"])
if self.AD.apps is True and namespace != "admin":
await self.AD.state.process_state_callbacks(namespace, data)
else:
self.logger.warning("Malformed 'state_changed' event: %s", data["data"])
return
# Check for log callbacks and exit to prevent loops
if data["event_type"] == "__AD_LOG_EVENT":
if await self.has_log_callback(data["data"]["app_name"]):
self.logger.debug("Discarding event for loop avoidance")
return
await self.AD.logging.process_log_callbacks(namespace, data)
if self.AD.apps is True: # and namespace != "admin":
# Process callbacks
await self.process_event_callbacks(namespace, data)
#
# Send to the stream
#
if self.AD.http is not None:
if data["event_type"] == "state_changed":
if data["data"]["new_state"] == data["data"]["old_state"]:
# Nothing changed so don't send
return
# take a copy without TS if present as it breaks deepcopy and jason
if "ts" in data["data"]:
ts = data["data"].pop("ts")
mydata = deepcopy(data)
data["data"]["ts"] = ts
else:
mydata = deepcopy(data)
await self.AD.http.stream_update(namespace, mydata)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error during process_event()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
async def has_log_callback(self, name):
"""Returns ``True`` if the app has a log callback, ``False`` otherwise.
Used to prevent callback loops. In the calling logic, if this function returns
``True`` the resulting logging event will be suppressed.
Args:
name (str): Name of the app.
"""
has_log_callback = False
if name == "AppDaemon._stream":
has_log_callback = True
else:
async with self.AD.callbacks.callbacks_lock:
for callback in self.AD.callbacks.callbacks:
for _uuid in self.AD.callbacks.callbacks[callback]:
cb = self.AD.callbacks.callbacks[callback][_uuid]
if cb["name"] == name and cb["type"] == "event" and cb["event"] == "__AD_LOG_EVENT":
has_log_callback = True
elif cb["name"] == name and cb["type"] == "log":
has_log_callback = True
return has_log_callback
async def process_event_callbacks(self, namespace, data):
"""Processes a pure event callback.
Locate any callbacks that may be registered for this event, check for filters and if appropriate,
dispatch the event for further checking and eventual action.
Args:
namespace (str): Namespace of the event.
data: Data associated with the event.
Returns:
None.
"""
self.logger.debug("process_event_callbacks() %s %s", namespace, data)
removes = []
async with self.AD.callbacks.callbacks_lock:
for name in self.AD.callbacks.callbacks.keys():
for uuid_ in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][uuid_]
if callback["namespace"] == namespace or callback["namespace"] == "global" or namespace == "global":
#
# Check for either a blank event (for all events)
# Or the event is a match
# But don't allow a global listen for any system events (events that start with __)
#
if "event" in callback and (
(callback["event"] is None and data["event_type"][:2] != "__")
or data["event_type"] == callback["event"]
):
# Check any filters
_run = True
for key in callback["kwargs"]:
if key in data["data"] and callback["kwargs"][key] != data["data"][key]:
_run = False
if data["event_type"] == "__AD_LOG_EVENT":
if (
"log" in callback["kwargs"]
and callback["kwargs"]["log"] != data["data"]["log_type"]
):
_run = False
if _run:
if name in self.AD.app_management.objects:
executed = await self.AD.threading.dispatch_worker(
name,
{
"id": uuid_,
"name": name,
"objectid": self.AD.app_management.objects[name]["id"],
"type": "event",
"event": data["event_type"],
"function": callback["function"],
"data": data["data"],
"pin_app": callback["pin_app"],
"pin_thread": callback["pin_thread"],
"kwargs": callback["kwargs"],
},
)
# Remove the callback if appropriate
if executed is True:
remove = callback["kwargs"].get("oneshot", False)
if remove is True:
removes.append({"name": name, "uuid": uuid_})
for remove in removes:
await self.cancel_event_callback(remove["name"], remove["uuid"])
async def event_services(self, namespace, domain, service, kwargs):
if "event" in kwargs:
event = kwargs["event"]
del kwargs["event"]
await self.fire_event(namespace, event, **kwargs)
else:
self.logger.warning("Malformed 'fire_event' service call, as no event given")
@staticmethod
def sanitize_event_kwargs(app, kwargs):
kwargs_copy = kwargs.copy()
return utils._sanitize_kwargs(kwargs_copy, ["__silent"])
|
async def add_event_callback(self, name, namespace, cb, event, **kwargs):
"""Adds a callback for an event which is called internally by apps.
Args:
name (str): Name of the app.
namespace (str): Namespace of the event.
cb: Callback function.
event (str): Name of the event.
**kwargs: List of values to filter on, and additional arguments to pass to the callback.
Returns:
``None`` or the reference to the callback handle.
"""
if self.AD.threading.validate_pin(name, kwargs) is True:
if "pin" in kwargs:
pin_app = kwargs["pin_app"]
else:
pin_app = self.AD.app_management.objects[name]["pin_app"]
if "pin_thread" in kwargs:
pin_thread = kwargs["pin_thread"]
pin_app = True
else:
pin_thread = self.AD.app_management.objects[name]["pin_thread"]
async with self.AD.callbacks.callbacks_lock:
if name not in self.AD.callbacks.callbacks:
self.AD.callbacks.callbacks[name] = {}
handle = uuid.uuid4().hex
self.AD.callbacks.callbacks[name][handle] = {
"name": name,
"id": self.AD.app_management.objects[name]["id"],
"type": "event",
"function": cb,
"namespace": namespace,
"event": event,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs,
}
if "timeout" in kwargs:
exec_time = await self.AD.sched.get_now() + datetime.timedelta(seconds=int(kwargs["timeout"]))
kwargs["__timeout"] = await self.AD.sched.insert_schedule(
name, exec_time, None, False, None, __event_handle=handle,
)
await self.AD.state.add_entity(
"admin",
"event_callback.{}".format(handle),
"active",
{
"app": name,
"event_name": event,
"function": cb.__name__,
"pinned": pin_app,
"pinned_thread": pin_thread,
"fired": 0,
"executed": 0,
"kwargs": kwargs,
},
)
return handle
else:
return None
| 28 | 95 |
"""Module to handle all events within AppDaemon."""
import uuid
from copy import deepcopy
import traceback
import datetime
from appdaemon.appdaemon import AppDaemon
import appdaemon.utils as utils
class Events:
"""Encapsulate event handling."""
def __init__(self, ad: AppDaemon):
"""Constructor.
Args:
ad: Reference to the AppDaemon object
"""
self.AD = ad
self.logger = ad.logging.get_child("_events")
#
# Events
#
async def add_event_callback(self, name, namespace, cb, event, **kwargs):
"""Adds a callback for an event which is called internally by apps.
Args:
name (str): Name of the app.
namespace (str): Namespace of the event.
cb: Callback function.
event (str): Name of the event.
**kwargs: List of values to filter on, and additional arguments to pass to the callback.
Returns:
``None`` or the reference to the callback handle.
"""
if self.AD.threading.validate_pin(name, kwargs) is True:
if "pin" in kwargs:
pin_app = kwargs["pin_app"]
else:
pin_app = self.AD.app_management.objects[name]["pin_app"]
if "pin_thread" in kwargs:
pin_thread = kwargs["pin_thread"]
pin_app = True
else:
pin_thread = self.AD.app_management.objects[name]["pin_thread"]
async with self.AD.callbacks.callbacks_lock:
if name not in self.AD.callbacks.callbacks:
self.AD.callbacks.callbacks[name] = {}
handle = uuid.uuid4().hex
self.AD.callbacks.callbacks[name][handle] = {
"name": name,
"id": self.AD.app_management.objects[name]["id"],
"type": "event",
"function": cb,
"namespace": namespace,
"event": event,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs,
}
if "timeout" in kwargs:
exec_time = await self.AD.sched.get_now() + datetime.timedelta(seconds=int(kwargs["timeout"]))
kwargs["__timeout"] = await self.AD.sched.insert_schedule(
name, exec_time, None, False, None, __event_handle=handle,
)
await self.AD.state.add_entity(
"admin",
"event_callback.{}".format(handle),
"active",
{
"app": name,
"event_name": event,
"function": cb.__name__,
"pinned": pin_app,
"pinned_thread": pin_thread,
"fired": 0,
"executed": 0,
"kwargs": kwargs,
},
)
return handle
else:
return None
async def cancel_event_callback(self, name, handle):
"""Cancels an event callback.
Args:
name (str): Name of the app or module.
handle: Previously supplied callback handle for the callback.
Returns:
None.
"""
executed = False
async with self.AD.callbacks.callbacks_lock:
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
del self.AD.callbacks.callbacks[name][handle]
await self.AD.state.remove_entity("admin", "event_callback.{}".format(handle))
executed = True
if name in self.AD.callbacks.callbacks and self.AD.callbacks.callbacks[name] == {}:
del self.AD.callbacks.callbacks[name]
if not executed:
self.logger.warning(
"Invalid callback handle '{}' in cancel_event_callback() from app {}".format(handle, name)
)
return executed
async def info_event_callback(self, name, handle):
"""Gets the information of an event callback.
Args:
name (str): Name of the app or subsystem.
handle: Previously supplied handle for the callback.
Returns:
A dictionary of callback entries or rise a ``ValueError`` if an invalid handle is provided.
"""
async with self.AD.callbacks.callbacks_lock:
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][handle]
return callback["event"], callback["kwargs"].copy()
else:
raise ValueError("Invalid handle: {}".format(handle))
async def fire_event(self, namespace, event, **kwargs):
"""Fires an event.
If the namespace does not have a plugin associated with it, the event will be fired locally.
If a plugin is associated, the firing of the event will be delegated to the plugin, under the
understanding that when the event is fired, the plugin will notify appdaemon that it occurred,
usually via the system the plugin is communicating with.
Args:
namespace (str): Namespace for the event to be fired in.
event (str): Name of the event.
**kwargs: Arguments to associate with the event.
Returns:
None.
"""
self.logger.debug("fire_plugin_event() %s %s %s", namespace, event, kwargs)
plugin = await self.AD.plugins.get_plugin_object(namespace)
if hasattr(plugin, "fire_plugin_event"):
# We assume that the event will come back to us via the plugin
await plugin.fire_plugin_event(event, namespace, **kwargs)
else:
# Just fire the event locally
await self.AD.events.process_event(namespace, {"event_type": event, "data": kwargs})
async def process_event(self, namespace, data):
"""Processes an event that has been received either locally or from a plugin.
Args:
namespace (str): Namespace the event was fired in.
data: Data associated with the event.
Returns:
None.
"""
try:
# if data["event_type"] == "__AD_ENTITY_REMOVED":
# print("process event")
self.logger.debug("Event type:%s:", data["event_type"])
self.logger.debug(data["data"])
# Kick the scheduler so it updates it's clock
if self.AD.sched is not None and self.AD.sched.realtime is False and namespace != "admin":
await self.AD.sched.kick()
if data["event_type"] == "state_changed":
if "entity_id" in data["data"] and "new_state" in data["data"]:
if data["data"]["new_state"] is None:
# most likely it is a deleted entity
return
entity_id = data["data"]["entity_id"]
self.AD.state.set_state_simple(namespace, entity_id, data["data"]["new_state"])
if self.AD.apps is True and namespace != "admin":
await self.AD.state.process_state_callbacks(namespace, data)
else:
self.logger.warning("Malformed 'state_changed' event: %s", data["data"])
return
# Check for log callbacks and exit to prevent loops
if data["event_type"] == "__AD_LOG_EVENT":
if await self.has_log_callback(data["data"]["app_name"]):
self.logger.debug("Discarding event for loop avoidance")
return
await self.AD.logging.process_log_callbacks(namespace, data)
if self.AD.apps is True: # and namespace != "admin":
# Process callbacks
await self.process_event_callbacks(namespace, data)
#
# Send to the stream
#
if self.AD.http is not None:
if data["event_type"] == "state_changed":
if data["data"]["new_state"] == data["data"]["old_state"]:
# Nothing changed so don't send
return
# take a copy without TS if present as it breaks deepcopy and jason
if "ts" in data["data"]:
ts = data["data"].pop("ts")
mydata = deepcopy(data)
data["data"]["ts"] = ts
else:
mydata = deepcopy(data)
await self.AD.http.stream_update(namespace, mydata)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error during process_event()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
async def has_log_callback(self, name):
"""Returns ``True`` if the app has a log callback, ``False`` otherwise.
Used to prevent callback loops. In the calling logic, if this function returns
``True`` the resulting logging event will be suppressed.
Args:
name (str): Name of the app.
"""
has_log_callback = False
if name == "AppDaemon._stream":
has_log_callback = True
else:
async with self.AD.callbacks.callbacks_lock:
for callback in self.AD.callbacks.callbacks:
for _uuid in self.AD.callbacks.callbacks[callback]:
cb = self.AD.callbacks.callbacks[callback][_uuid]
if cb["name"] == name and cb["type"] == "event" and cb["event"] == "__AD_LOG_EVENT":
has_log_callback = True
elif cb["name"] == name and cb["type"] == "log":
has_log_callback = True
return has_log_callback
async def process_event_callbacks(self, namespace, data):
"""Processes a pure event callback.
Locate any callbacks that may be registered for this event, check for filters and if appropriate,
dispatch the event for further checking and eventual action.
Args:
namespace (str): Namespace of the event.
data: Data associated with the event.
Returns:
None.
"""
self.logger.debug("process_event_callbacks() %s %s", namespace, data)
removes = []
async with self.AD.callbacks.callbacks_lock:
for name in self.AD.callbacks.callbacks.keys():
for uuid_ in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][uuid_]
if callback["namespace"] == namespace or callback["namespace"] == "global" or namespace == "global":
#
# Check for either a blank event (for all events)
# Or the event is a match
# But don't allow a global listen for any system events (events that start with __)
#
if "event" in callback and (
(callback["event"] is None and data["event_type"][:2] != "__")
or data["event_type"] == callback["event"]
):
# Check any filters
_run = True
for key in callback["kwargs"]:
if key in data["data"] and callback["kwargs"][key] != data["data"][key]:
_run = False
if data["event_type"] == "__AD_LOG_EVENT":
if (
"log" in callback["kwargs"]
and callback["kwargs"]["log"] != data["data"]["log_type"]
):
_run = False
if _run:
if name in self.AD.app_management.objects:
executed = await self.AD.threading.dispatch_worker(
name,
{
"id": uuid_,
"name": name,
"objectid": self.AD.app_management.objects[name]["id"],
"type": "event",
"event": data["event_type"],
"function": callback["function"],
"data": data["data"],
"pin_app": callback["pin_app"],
"pin_thread": callback["pin_thread"],
"kwargs": callback["kwargs"],
},
)
# Remove the callback if appropriate
if executed is True:
remove = callback["kwargs"].get("oneshot", False)
if remove is True:
removes.append({"name": name, "uuid": uuid_})
for remove in removes:
await self.cancel_event_callback(remove["name"], remove["uuid"])
async def event_services(self, namespace, domain, service, kwargs):
if "event" in kwargs:
event = kwargs["event"]
del kwargs["event"]
await self.fire_event(namespace, event, **kwargs)
else:
self.logger.warning("Malformed 'fire_event' service call, as no event given")
@staticmethod
def sanitize_event_kwargs(app, kwargs):
kwargs_copy = kwargs.copy()
return utils._sanitize_kwargs(kwargs_copy, ["__silent"])
|
fire_event
|
Fires an event.
If the namespace does not have a plugin associated with it, the event will be fired locally.
If a plugin is associated, the firing of the event will be delegated to the plugin, under the
understanding that when the event is fired, the plugin will notify appdaemon that it occurred,
usually via the system the plugin is communicating with.
Args:
namespace (str): Namespace for the event to be fired in.
event (str): Name of the event.
**kwargs: Arguments to associate with the event.
Returns:
None.
|
"""Module to handle all events within AppDaemon."""
import uuid
from copy import deepcopy
import traceback
import datetime
from appdaemon.appdaemon import AppDaemon
import appdaemon.utils as utils
class Events:
"""Encapsulate event handling."""
def __init__(self, ad: AppDaemon):
"""Constructor.
Args:
ad: Reference to the AppDaemon object
"""
self.AD = ad
self.logger = ad.logging.get_child("_events")
#
# Events
#
async def add_event_callback(self, name, namespace, cb, event, **kwargs):
"""Adds a callback for an event which is called internally by apps.
Args:
name (str): Name of the app.
namespace (str): Namespace of the event.
cb: Callback function.
event (str): Name of the event.
**kwargs: List of values to filter on, and additional arguments to pass to the callback.
Returns:
``None`` or the reference to the callback handle.
"""
if self.AD.threading.validate_pin(name, kwargs) is True:
if "pin" in kwargs:
pin_app = kwargs["pin_app"]
else:
pin_app = self.AD.app_management.objects[name]["pin_app"]
if "pin_thread" in kwargs:
pin_thread = kwargs["pin_thread"]
pin_app = True
else:
pin_thread = self.AD.app_management.objects[name]["pin_thread"]
async with self.AD.callbacks.callbacks_lock:
if name not in self.AD.callbacks.callbacks:
self.AD.callbacks.callbacks[name] = {}
handle = uuid.uuid4().hex
self.AD.callbacks.callbacks[name][handle] = {
"name": name,
"id": self.AD.app_management.objects[name]["id"],
"type": "event",
"function": cb,
"namespace": namespace,
"event": event,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs,
}
if "timeout" in kwargs:
exec_time = await self.AD.sched.get_now() + datetime.timedelta(seconds=int(kwargs["timeout"]))
kwargs["__timeout"] = await self.AD.sched.insert_schedule(
name, exec_time, None, False, None, __event_handle=handle,
)
await self.AD.state.add_entity(
"admin",
"event_callback.{}".format(handle),
"active",
{
"app": name,
"event_name": event,
"function": cb.__name__,
"pinned": pin_app,
"pinned_thread": pin_thread,
"fired": 0,
"executed": 0,
"kwargs": kwargs,
},
)
return handle
else:
return None
async def cancel_event_callback(self, name, handle):
"""Cancels an event callback.
Args:
name (str): Name of the app or module.
handle: Previously supplied callback handle for the callback.
Returns:
None.
"""
executed = False
async with self.AD.callbacks.callbacks_lock:
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
del self.AD.callbacks.callbacks[name][handle]
await self.AD.state.remove_entity("admin", "event_callback.{}".format(handle))
executed = True
if name in self.AD.callbacks.callbacks and self.AD.callbacks.callbacks[name] == {}:
del self.AD.callbacks.callbacks[name]
if not executed:
self.logger.warning(
"Invalid callback handle '{}' in cancel_event_callback() from app {}".format(handle, name)
)
return executed
async def info_event_callback(self, name, handle):
"""Gets the information of an event callback.
Args:
name (str): Name of the app or subsystem.
handle: Previously supplied handle for the callback.
Returns:
A dictionary of callback entries or rise a ``ValueError`` if an invalid handle is provided.
"""
async with self.AD.callbacks.callbacks_lock:
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][handle]
return callback["event"], callback["kwargs"].copy()
else:
raise ValueError("Invalid handle: {}".format(handle))
# MASKED: fire_event function (lines 146-172)
async def process_event(self, namespace, data):
"""Processes an event that has been received either locally or from a plugin.
Args:
namespace (str): Namespace the event was fired in.
data: Data associated with the event.
Returns:
None.
"""
try:
# if data["event_type"] == "__AD_ENTITY_REMOVED":
# print("process event")
self.logger.debug("Event type:%s:", data["event_type"])
self.logger.debug(data["data"])
# Kick the scheduler so it updates it's clock
if self.AD.sched is not None and self.AD.sched.realtime is False and namespace != "admin":
await self.AD.sched.kick()
if data["event_type"] == "state_changed":
if "entity_id" in data["data"] and "new_state" in data["data"]:
if data["data"]["new_state"] is None:
# most likely it is a deleted entity
return
entity_id = data["data"]["entity_id"]
self.AD.state.set_state_simple(namespace, entity_id, data["data"]["new_state"])
if self.AD.apps is True and namespace != "admin":
await self.AD.state.process_state_callbacks(namespace, data)
else:
self.logger.warning("Malformed 'state_changed' event: %s", data["data"])
return
# Check for log callbacks and exit to prevent loops
if data["event_type"] == "__AD_LOG_EVENT":
if await self.has_log_callback(data["data"]["app_name"]):
self.logger.debug("Discarding event for loop avoidance")
return
await self.AD.logging.process_log_callbacks(namespace, data)
if self.AD.apps is True: # and namespace != "admin":
# Process callbacks
await self.process_event_callbacks(namespace, data)
#
# Send to the stream
#
if self.AD.http is not None:
if data["event_type"] == "state_changed":
if data["data"]["new_state"] == data["data"]["old_state"]:
# Nothing changed so don't send
return
# take a copy without TS if present as it breaks deepcopy and jason
if "ts" in data["data"]:
ts = data["data"].pop("ts")
mydata = deepcopy(data)
data["data"]["ts"] = ts
else:
mydata = deepcopy(data)
await self.AD.http.stream_update(namespace, mydata)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error during process_event()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
async def has_log_callback(self, name):
"""Returns ``True`` if the app has a log callback, ``False`` otherwise.
Used to prevent callback loops. In the calling logic, if this function returns
``True`` the resulting logging event will be suppressed.
Args:
name (str): Name of the app.
"""
has_log_callback = False
if name == "AppDaemon._stream":
has_log_callback = True
else:
async with self.AD.callbacks.callbacks_lock:
for callback in self.AD.callbacks.callbacks:
for _uuid in self.AD.callbacks.callbacks[callback]:
cb = self.AD.callbacks.callbacks[callback][_uuid]
if cb["name"] == name and cb["type"] == "event" and cb["event"] == "__AD_LOG_EVENT":
has_log_callback = True
elif cb["name"] == name and cb["type"] == "log":
has_log_callback = True
return has_log_callback
async def process_event_callbacks(self, namespace, data):
"""Processes a pure event callback.
Locate any callbacks that may be registered for this event, check for filters and if appropriate,
dispatch the event for further checking and eventual action.
Args:
namespace (str): Namespace of the event.
data: Data associated with the event.
Returns:
None.
"""
self.logger.debug("process_event_callbacks() %s %s", namespace, data)
removes = []
async with self.AD.callbacks.callbacks_lock:
for name in self.AD.callbacks.callbacks.keys():
for uuid_ in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][uuid_]
if callback["namespace"] == namespace or callback["namespace"] == "global" or namespace == "global":
#
# Check for either a blank event (for all events)
# Or the event is a match
# But don't allow a global listen for any system events (events that start with __)
#
if "event" in callback and (
(callback["event"] is None and data["event_type"][:2] != "__")
or data["event_type"] == callback["event"]
):
# Check any filters
_run = True
for key in callback["kwargs"]:
if key in data["data"] and callback["kwargs"][key] != data["data"][key]:
_run = False
if data["event_type"] == "__AD_LOG_EVENT":
if (
"log" in callback["kwargs"]
and callback["kwargs"]["log"] != data["data"]["log_type"]
):
_run = False
if _run:
if name in self.AD.app_management.objects:
executed = await self.AD.threading.dispatch_worker(
name,
{
"id": uuid_,
"name": name,
"objectid": self.AD.app_management.objects[name]["id"],
"type": "event",
"event": data["event_type"],
"function": callback["function"],
"data": data["data"],
"pin_app": callback["pin_app"],
"pin_thread": callback["pin_thread"],
"kwargs": callback["kwargs"],
},
)
# Remove the callback if appropriate
if executed is True:
remove = callback["kwargs"].get("oneshot", False)
if remove is True:
removes.append({"name": name, "uuid": uuid_})
for remove in removes:
await self.cancel_event_callback(remove["name"], remove["uuid"])
async def event_services(self, namespace, domain, service, kwargs):
if "event" in kwargs:
event = kwargs["event"]
del kwargs["event"]
await self.fire_event(namespace, event, **kwargs)
else:
self.logger.warning("Malformed 'fire_event' service call, as no event given")
@staticmethod
def sanitize_event_kwargs(app, kwargs):
kwargs_copy = kwargs.copy()
return utils._sanitize_kwargs(kwargs_copy, ["__silent"])
|
async def fire_event(self, namespace, event, **kwargs):
"""Fires an event.
If the namespace does not have a plugin associated with it, the event will be fired locally.
If a plugin is associated, the firing of the event will be delegated to the plugin, under the
understanding that when the event is fired, the plugin will notify appdaemon that it occurred,
usually via the system the plugin is communicating with.
Args:
namespace (str): Namespace for the event to be fired in.
event (str): Name of the event.
**kwargs: Arguments to associate with the event.
Returns:
None.
"""
self.logger.debug("fire_plugin_event() %s %s %s", namespace, event, kwargs)
plugin = await self.AD.plugins.get_plugin_object(namespace)
if hasattr(plugin, "fire_plugin_event"):
# We assume that the event will come back to us via the plugin
await plugin.fire_plugin_event(event, namespace, **kwargs)
else:
# Just fire the event locally
await self.AD.events.process_event(namespace, {"event_type": event, "data": kwargs})
| 146 | 172 |
"""Module to handle all events within AppDaemon."""
import uuid
from copy import deepcopy
import traceback
import datetime
from appdaemon.appdaemon import AppDaemon
import appdaemon.utils as utils
class Events:
"""Encapsulate event handling."""
def __init__(self, ad: AppDaemon):
"""Constructor.
Args:
ad: Reference to the AppDaemon object
"""
self.AD = ad
self.logger = ad.logging.get_child("_events")
#
# Events
#
async def add_event_callback(self, name, namespace, cb, event, **kwargs):
"""Adds a callback for an event which is called internally by apps.
Args:
name (str): Name of the app.
namespace (str): Namespace of the event.
cb: Callback function.
event (str): Name of the event.
**kwargs: List of values to filter on, and additional arguments to pass to the callback.
Returns:
``None`` or the reference to the callback handle.
"""
if self.AD.threading.validate_pin(name, kwargs) is True:
if "pin" in kwargs:
pin_app = kwargs["pin_app"]
else:
pin_app = self.AD.app_management.objects[name]["pin_app"]
if "pin_thread" in kwargs:
pin_thread = kwargs["pin_thread"]
pin_app = True
else:
pin_thread = self.AD.app_management.objects[name]["pin_thread"]
async with self.AD.callbacks.callbacks_lock:
if name not in self.AD.callbacks.callbacks:
self.AD.callbacks.callbacks[name] = {}
handle = uuid.uuid4().hex
self.AD.callbacks.callbacks[name][handle] = {
"name": name,
"id": self.AD.app_management.objects[name]["id"],
"type": "event",
"function": cb,
"namespace": namespace,
"event": event,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs,
}
if "timeout" in kwargs:
exec_time = await self.AD.sched.get_now() + datetime.timedelta(seconds=int(kwargs["timeout"]))
kwargs["__timeout"] = await self.AD.sched.insert_schedule(
name, exec_time, None, False, None, __event_handle=handle,
)
await self.AD.state.add_entity(
"admin",
"event_callback.{}".format(handle),
"active",
{
"app": name,
"event_name": event,
"function": cb.__name__,
"pinned": pin_app,
"pinned_thread": pin_thread,
"fired": 0,
"executed": 0,
"kwargs": kwargs,
},
)
return handle
else:
return None
async def cancel_event_callback(self, name, handle):
"""Cancels an event callback.
Args:
name (str): Name of the app or module.
handle: Previously supplied callback handle for the callback.
Returns:
None.
"""
executed = False
async with self.AD.callbacks.callbacks_lock:
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
del self.AD.callbacks.callbacks[name][handle]
await self.AD.state.remove_entity("admin", "event_callback.{}".format(handle))
executed = True
if name in self.AD.callbacks.callbacks and self.AD.callbacks.callbacks[name] == {}:
del self.AD.callbacks.callbacks[name]
if not executed:
self.logger.warning(
"Invalid callback handle '{}' in cancel_event_callback() from app {}".format(handle, name)
)
return executed
async def info_event_callback(self, name, handle):
"""Gets the information of an event callback.
Args:
name (str): Name of the app or subsystem.
handle: Previously supplied handle for the callback.
Returns:
A dictionary of callback entries or rise a ``ValueError`` if an invalid handle is provided.
"""
async with self.AD.callbacks.callbacks_lock:
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][handle]
return callback["event"], callback["kwargs"].copy()
else:
raise ValueError("Invalid handle: {}".format(handle))
async def fire_event(self, namespace, event, **kwargs):
"""Fires an event.
If the namespace does not have a plugin associated with it, the event will be fired locally.
If a plugin is associated, the firing of the event will be delegated to the plugin, under the
understanding that when the event is fired, the plugin will notify appdaemon that it occurred,
usually via the system the plugin is communicating with.
Args:
namespace (str): Namespace for the event to be fired in.
event (str): Name of the event.
**kwargs: Arguments to associate with the event.
Returns:
None.
"""
self.logger.debug("fire_plugin_event() %s %s %s", namespace, event, kwargs)
plugin = await self.AD.plugins.get_plugin_object(namespace)
if hasattr(plugin, "fire_plugin_event"):
# We assume that the event will come back to us via the plugin
await plugin.fire_plugin_event(event, namespace, **kwargs)
else:
# Just fire the event locally
await self.AD.events.process_event(namespace, {"event_type": event, "data": kwargs})
async def process_event(self, namespace, data):
"""Processes an event that has been received either locally or from a plugin.
Args:
namespace (str): Namespace the event was fired in.
data: Data associated with the event.
Returns:
None.
"""
try:
# if data["event_type"] == "__AD_ENTITY_REMOVED":
# print("process event")
self.logger.debug("Event type:%s:", data["event_type"])
self.logger.debug(data["data"])
# Kick the scheduler so it updates it's clock
if self.AD.sched is not None and self.AD.sched.realtime is False and namespace != "admin":
await self.AD.sched.kick()
if data["event_type"] == "state_changed":
if "entity_id" in data["data"] and "new_state" in data["data"]:
if data["data"]["new_state"] is None:
# most likely it is a deleted entity
return
entity_id = data["data"]["entity_id"]
self.AD.state.set_state_simple(namespace, entity_id, data["data"]["new_state"])
if self.AD.apps is True and namespace != "admin":
await self.AD.state.process_state_callbacks(namespace, data)
else:
self.logger.warning("Malformed 'state_changed' event: %s", data["data"])
return
# Check for log callbacks and exit to prevent loops
if data["event_type"] == "__AD_LOG_EVENT":
if await self.has_log_callback(data["data"]["app_name"]):
self.logger.debug("Discarding event for loop avoidance")
return
await self.AD.logging.process_log_callbacks(namespace, data)
if self.AD.apps is True: # and namespace != "admin":
# Process callbacks
await self.process_event_callbacks(namespace, data)
#
# Send to the stream
#
if self.AD.http is not None:
if data["event_type"] == "state_changed":
if data["data"]["new_state"] == data["data"]["old_state"]:
# Nothing changed so don't send
return
# take a copy without TS if present as it breaks deepcopy and jason
if "ts" in data["data"]:
ts = data["data"].pop("ts")
mydata = deepcopy(data)
data["data"]["ts"] = ts
else:
mydata = deepcopy(data)
await self.AD.http.stream_update(namespace, mydata)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error during process_event()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
async def has_log_callback(self, name):
"""Returns ``True`` if the app has a log callback, ``False`` otherwise.
Used to prevent callback loops. In the calling logic, if this function returns
``True`` the resulting logging event will be suppressed.
Args:
name (str): Name of the app.
"""
has_log_callback = False
if name == "AppDaemon._stream":
has_log_callback = True
else:
async with self.AD.callbacks.callbacks_lock:
for callback in self.AD.callbacks.callbacks:
for _uuid in self.AD.callbacks.callbacks[callback]:
cb = self.AD.callbacks.callbacks[callback][_uuid]
if cb["name"] == name and cb["type"] == "event" and cb["event"] == "__AD_LOG_EVENT":
has_log_callback = True
elif cb["name"] == name and cb["type"] == "log":
has_log_callback = True
return has_log_callback
async def process_event_callbacks(self, namespace, data):
"""Processes a pure event callback.
Locate any callbacks that may be registered for this event, check for filters and if appropriate,
dispatch the event for further checking and eventual action.
Args:
namespace (str): Namespace of the event.
data: Data associated with the event.
Returns:
None.
"""
self.logger.debug("process_event_callbacks() %s %s", namespace, data)
removes = []
async with self.AD.callbacks.callbacks_lock:
for name in self.AD.callbacks.callbacks.keys():
for uuid_ in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][uuid_]
if callback["namespace"] == namespace or callback["namespace"] == "global" or namespace == "global":
#
# Check for either a blank event (for all events)
# Or the event is a match
# But don't allow a global listen for any system events (events that start with __)
#
if "event" in callback and (
(callback["event"] is None and data["event_type"][:2] != "__")
or data["event_type"] == callback["event"]
):
# Check any filters
_run = True
for key in callback["kwargs"]:
if key in data["data"] and callback["kwargs"][key] != data["data"][key]:
_run = False
if data["event_type"] == "__AD_LOG_EVENT":
if (
"log" in callback["kwargs"]
and callback["kwargs"]["log"] != data["data"]["log_type"]
):
_run = False
if _run:
if name in self.AD.app_management.objects:
executed = await self.AD.threading.dispatch_worker(
name,
{
"id": uuid_,
"name": name,
"objectid": self.AD.app_management.objects[name]["id"],
"type": "event",
"event": data["event_type"],
"function": callback["function"],
"data": data["data"],
"pin_app": callback["pin_app"],
"pin_thread": callback["pin_thread"],
"kwargs": callback["kwargs"],
},
)
# Remove the callback if appropriate
if executed is True:
remove = callback["kwargs"].get("oneshot", False)
if remove is True:
removes.append({"name": name, "uuid": uuid_})
for remove in removes:
await self.cancel_event_callback(remove["name"], remove["uuid"])
async def event_services(self, namespace, domain, service, kwargs):
if "event" in kwargs:
event = kwargs["event"]
del kwargs["event"]
await self.fire_event(namespace, event, **kwargs)
else:
self.logger.warning("Malformed 'fire_event' service call, as no event given")
@staticmethod
def sanitize_event_kwargs(app, kwargs):
kwargs_copy = kwargs.copy()
return utils._sanitize_kwargs(kwargs_copy, ["__silent"])
|
has_log_callback
|
Returns ``True`` if the app has a log callback, ``False`` otherwise.
Used to prevent callback loops. In the calling logic, if this function returns
``True`` the resulting logging event will be suppressed.
Args:
name (str): Name of the app.
|
"""Module to handle all events within AppDaemon."""
import uuid
from copy import deepcopy
import traceback
import datetime
from appdaemon.appdaemon import AppDaemon
import appdaemon.utils as utils
class Events:
"""Encapsulate event handling."""
def __init__(self, ad: AppDaemon):
"""Constructor.
Args:
ad: Reference to the AppDaemon object
"""
self.AD = ad
self.logger = ad.logging.get_child("_events")
#
# Events
#
async def add_event_callback(self, name, namespace, cb, event, **kwargs):
"""Adds a callback for an event which is called internally by apps.
Args:
name (str): Name of the app.
namespace (str): Namespace of the event.
cb: Callback function.
event (str): Name of the event.
**kwargs: List of values to filter on, and additional arguments to pass to the callback.
Returns:
``None`` or the reference to the callback handle.
"""
if self.AD.threading.validate_pin(name, kwargs) is True:
if "pin" in kwargs:
pin_app = kwargs["pin_app"]
else:
pin_app = self.AD.app_management.objects[name]["pin_app"]
if "pin_thread" in kwargs:
pin_thread = kwargs["pin_thread"]
pin_app = True
else:
pin_thread = self.AD.app_management.objects[name]["pin_thread"]
async with self.AD.callbacks.callbacks_lock:
if name not in self.AD.callbacks.callbacks:
self.AD.callbacks.callbacks[name] = {}
handle = uuid.uuid4().hex
self.AD.callbacks.callbacks[name][handle] = {
"name": name,
"id": self.AD.app_management.objects[name]["id"],
"type": "event",
"function": cb,
"namespace": namespace,
"event": event,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs,
}
if "timeout" in kwargs:
exec_time = await self.AD.sched.get_now() + datetime.timedelta(seconds=int(kwargs["timeout"]))
kwargs["__timeout"] = await self.AD.sched.insert_schedule(
name, exec_time, None, False, None, __event_handle=handle,
)
await self.AD.state.add_entity(
"admin",
"event_callback.{}".format(handle),
"active",
{
"app": name,
"event_name": event,
"function": cb.__name__,
"pinned": pin_app,
"pinned_thread": pin_thread,
"fired": 0,
"executed": 0,
"kwargs": kwargs,
},
)
return handle
else:
return None
async def cancel_event_callback(self, name, handle):
"""Cancels an event callback.
Args:
name (str): Name of the app or module.
handle: Previously supplied callback handle for the callback.
Returns:
None.
"""
executed = False
async with self.AD.callbacks.callbacks_lock:
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
del self.AD.callbacks.callbacks[name][handle]
await self.AD.state.remove_entity("admin", "event_callback.{}".format(handle))
executed = True
if name in self.AD.callbacks.callbacks and self.AD.callbacks.callbacks[name] == {}:
del self.AD.callbacks.callbacks[name]
if not executed:
self.logger.warning(
"Invalid callback handle '{}' in cancel_event_callback() from app {}".format(handle, name)
)
return executed
async def info_event_callback(self, name, handle):
"""Gets the information of an event callback.
Args:
name (str): Name of the app or subsystem.
handle: Previously supplied handle for the callback.
Returns:
A dictionary of callback entries or rise a ``ValueError`` if an invalid handle is provided.
"""
async with self.AD.callbacks.callbacks_lock:
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][handle]
return callback["event"], callback["kwargs"].copy()
else:
raise ValueError("Invalid handle: {}".format(handle))
async def fire_event(self, namespace, event, **kwargs):
"""Fires an event.
If the namespace does not have a plugin associated with it, the event will be fired locally.
If a plugin is associated, the firing of the event will be delegated to the plugin, under the
understanding that when the event is fired, the plugin will notify appdaemon that it occurred,
usually via the system the plugin is communicating with.
Args:
namespace (str): Namespace for the event to be fired in.
event (str): Name of the event.
**kwargs: Arguments to associate with the event.
Returns:
None.
"""
self.logger.debug("fire_plugin_event() %s %s %s", namespace, event, kwargs)
plugin = await self.AD.plugins.get_plugin_object(namespace)
if hasattr(plugin, "fire_plugin_event"):
# We assume that the event will come back to us via the plugin
await plugin.fire_plugin_event(event, namespace, **kwargs)
else:
# Just fire the event locally
await self.AD.events.process_event(namespace, {"event_type": event, "data": kwargs})
async def process_event(self, namespace, data):
"""Processes an event that has been received either locally or from a plugin.
Args:
namespace (str): Namespace the event was fired in.
data: Data associated with the event.
Returns:
None.
"""
try:
# if data["event_type"] == "__AD_ENTITY_REMOVED":
# print("process event")
self.logger.debug("Event type:%s:", data["event_type"])
self.logger.debug(data["data"])
# Kick the scheduler so it updates it's clock
if self.AD.sched is not None and self.AD.sched.realtime is False and namespace != "admin":
await self.AD.sched.kick()
if data["event_type"] == "state_changed":
if "entity_id" in data["data"] and "new_state" in data["data"]:
if data["data"]["new_state"] is None:
# most likely it is a deleted entity
return
entity_id = data["data"]["entity_id"]
self.AD.state.set_state_simple(namespace, entity_id, data["data"]["new_state"])
if self.AD.apps is True and namespace != "admin":
await self.AD.state.process_state_callbacks(namespace, data)
else:
self.logger.warning("Malformed 'state_changed' event: %s", data["data"])
return
# Check for log callbacks and exit to prevent loops
if data["event_type"] == "__AD_LOG_EVENT":
if await self.has_log_callback(data["data"]["app_name"]):
self.logger.debug("Discarding event for loop avoidance")
return
await self.AD.logging.process_log_callbacks(namespace, data)
if self.AD.apps is True: # and namespace != "admin":
# Process callbacks
await self.process_event_callbacks(namespace, data)
#
# Send to the stream
#
if self.AD.http is not None:
if data["event_type"] == "state_changed":
if data["data"]["new_state"] == data["data"]["old_state"]:
# Nothing changed so don't send
return
# take a copy without TS if present as it breaks deepcopy and jason
if "ts" in data["data"]:
ts = data["data"].pop("ts")
mydata = deepcopy(data)
data["data"]["ts"] = ts
else:
mydata = deepcopy(data)
await self.AD.http.stream_update(namespace, mydata)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error during process_event()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
# MASKED: has_log_callback function (lines 255-279)
async def process_event_callbacks(self, namespace, data):
"""Processes a pure event callback.
Locate any callbacks that may be registered for this event, check for filters and if appropriate,
dispatch the event for further checking and eventual action.
Args:
namespace (str): Namespace of the event.
data: Data associated with the event.
Returns:
None.
"""
self.logger.debug("process_event_callbacks() %s %s", namespace, data)
removes = []
async with self.AD.callbacks.callbacks_lock:
for name in self.AD.callbacks.callbacks.keys():
for uuid_ in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][uuid_]
if callback["namespace"] == namespace or callback["namespace"] == "global" or namespace == "global":
#
# Check for either a blank event (for all events)
# Or the event is a match
# But don't allow a global listen for any system events (events that start with __)
#
if "event" in callback and (
(callback["event"] is None and data["event_type"][:2] != "__")
or data["event_type"] == callback["event"]
):
# Check any filters
_run = True
for key in callback["kwargs"]:
if key in data["data"] and callback["kwargs"][key] != data["data"][key]:
_run = False
if data["event_type"] == "__AD_LOG_EVENT":
if (
"log" in callback["kwargs"]
and callback["kwargs"]["log"] != data["data"]["log_type"]
):
_run = False
if _run:
if name in self.AD.app_management.objects:
executed = await self.AD.threading.dispatch_worker(
name,
{
"id": uuid_,
"name": name,
"objectid": self.AD.app_management.objects[name]["id"],
"type": "event",
"event": data["event_type"],
"function": callback["function"],
"data": data["data"],
"pin_app": callback["pin_app"],
"pin_thread": callback["pin_thread"],
"kwargs": callback["kwargs"],
},
)
# Remove the callback if appropriate
if executed is True:
remove = callback["kwargs"].get("oneshot", False)
if remove is True:
removes.append({"name": name, "uuid": uuid_})
for remove in removes:
await self.cancel_event_callback(remove["name"], remove["uuid"])
async def event_services(self, namespace, domain, service, kwargs):
if "event" in kwargs:
event = kwargs["event"]
del kwargs["event"]
await self.fire_event(namespace, event, **kwargs)
else:
self.logger.warning("Malformed 'fire_event' service call, as no event given")
@staticmethod
def sanitize_event_kwargs(app, kwargs):
kwargs_copy = kwargs.copy()
return utils._sanitize_kwargs(kwargs_copy, ["__silent"])
|
async def has_log_callback(self, name):
"""Returns ``True`` if the app has a log callback, ``False`` otherwise.
Used to prevent callback loops. In the calling logic, if this function returns
``True`` the resulting logging event will be suppressed.
Args:
name (str): Name of the app.
"""
has_log_callback = False
if name == "AppDaemon._stream":
has_log_callback = True
else:
async with self.AD.callbacks.callbacks_lock:
for callback in self.AD.callbacks.callbacks:
for _uuid in self.AD.callbacks.callbacks[callback]:
cb = self.AD.callbacks.callbacks[callback][_uuid]
if cb["name"] == name and cb["type"] == "event" and cb["event"] == "__AD_LOG_EVENT":
has_log_callback = True
elif cb["name"] == name and cb["type"] == "log":
has_log_callback = True
return has_log_callback
| 255 | 279 |
"""Module to handle all events within AppDaemon."""
import uuid
from copy import deepcopy
import traceback
import datetime
from appdaemon.appdaemon import AppDaemon
import appdaemon.utils as utils
class Events:
"""Encapsulate event handling."""
def __init__(self, ad: AppDaemon):
"""Constructor.
Args:
ad: Reference to the AppDaemon object
"""
self.AD = ad
self.logger = ad.logging.get_child("_events")
#
# Events
#
async def add_event_callback(self, name, namespace, cb, event, **kwargs):
"""Adds a callback for an event which is called internally by apps.
Args:
name (str): Name of the app.
namespace (str): Namespace of the event.
cb: Callback function.
event (str): Name of the event.
**kwargs: List of values to filter on, and additional arguments to pass to the callback.
Returns:
``None`` or the reference to the callback handle.
"""
if self.AD.threading.validate_pin(name, kwargs) is True:
if "pin" in kwargs:
pin_app = kwargs["pin_app"]
else:
pin_app = self.AD.app_management.objects[name]["pin_app"]
if "pin_thread" in kwargs:
pin_thread = kwargs["pin_thread"]
pin_app = True
else:
pin_thread = self.AD.app_management.objects[name]["pin_thread"]
async with self.AD.callbacks.callbacks_lock:
if name not in self.AD.callbacks.callbacks:
self.AD.callbacks.callbacks[name] = {}
handle = uuid.uuid4().hex
self.AD.callbacks.callbacks[name][handle] = {
"name": name,
"id": self.AD.app_management.objects[name]["id"],
"type": "event",
"function": cb,
"namespace": namespace,
"event": event,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs,
}
if "timeout" in kwargs:
exec_time = await self.AD.sched.get_now() + datetime.timedelta(seconds=int(kwargs["timeout"]))
kwargs["__timeout"] = await self.AD.sched.insert_schedule(
name, exec_time, None, False, None, __event_handle=handle,
)
await self.AD.state.add_entity(
"admin",
"event_callback.{}".format(handle),
"active",
{
"app": name,
"event_name": event,
"function": cb.__name__,
"pinned": pin_app,
"pinned_thread": pin_thread,
"fired": 0,
"executed": 0,
"kwargs": kwargs,
},
)
return handle
else:
return None
async def cancel_event_callback(self, name, handle):
"""Cancels an event callback.
Args:
name (str): Name of the app or module.
handle: Previously supplied callback handle for the callback.
Returns:
None.
"""
executed = False
async with self.AD.callbacks.callbacks_lock:
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
del self.AD.callbacks.callbacks[name][handle]
await self.AD.state.remove_entity("admin", "event_callback.{}".format(handle))
executed = True
if name in self.AD.callbacks.callbacks and self.AD.callbacks.callbacks[name] == {}:
del self.AD.callbacks.callbacks[name]
if not executed:
self.logger.warning(
"Invalid callback handle '{}' in cancel_event_callback() from app {}".format(handle, name)
)
return executed
async def info_event_callback(self, name, handle):
"""Gets the information of an event callback.
Args:
name (str): Name of the app or subsystem.
handle: Previously supplied handle for the callback.
Returns:
A dictionary of callback entries or rise a ``ValueError`` if an invalid handle is provided.
"""
async with self.AD.callbacks.callbacks_lock:
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][handle]
return callback["event"], callback["kwargs"].copy()
else:
raise ValueError("Invalid handle: {}".format(handle))
async def fire_event(self, namespace, event, **kwargs):
"""Fires an event.
If the namespace does not have a plugin associated with it, the event will be fired locally.
If a plugin is associated, the firing of the event will be delegated to the plugin, under the
understanding that when the event is fired, the plugin will notify appdaemon that it occurred,
usually via the system the plugin is communicating with.
Args:
namespace (str): Namespace for the event to be fired in.
event (str): Name of the event.
**kwargs: Arguments to associate with the event.
Returns:
None.
"""
self.logger.debug("fire_plugin_event() %s %s %s", namespace, event, kwargs)
plugin = await self.AD.plugins.get_plugin_object(namespace)
if hasattr(plugin, "fire_plugin_event"):
# We assume that the event will come back to us via the plugin
await plugin.fire_plugin_event(event, namespace, **kwargs)
else:
# Just fire the event locally
await self.AD.events.process_event(namespace, {"event_type": event, "data": kwargs})
async def process_event(self, namespace, data):
"""Processes an event that has been received either locally or from a plugin.
Args:
namespace (str): Namespace the event was fired in.
data: Data associated with the event.
Returns:
None.
"""
try:
# if data["event_type"] == "__AD_ENTITY_REMOVED":
# print("process event")
self.logger.debug("Event type:%s:", data["event_type"])
self.logger.debug(data["data"])
# Kick the scheduler so it updates it's clock
if self.AD.sched is not None and self.AD.sched.realtime is False and namespace != "admin":
await self.AD.sched.kick()
if data["event_type"] == "state_changed":
if "entity_id" in data["data"] and "new_state" in data["data"]:
if data["data"]["new_state"] is None:
# most likely it is a deleted entity
return
entity_id = data["data"]["entity_id"]
self.AD.state.set_state_simple(namespace, entity_id, data["data"]["new_state"])
if self.AD.apps is True and namespace != "admin":
await self.AD.state.process_state_callbacks(namespace, data)
else:
self.logger.warning("Malformed 'state_changed' event: %s", data["data"])
return
# Check for log callbacks and exit to prevent loops
if data["event_type"] == "__AD_LOG_EVENT":
if await self.has_log_callback(data["data"]["app_name"]):
self.logger.debug("Discarding event for loop avoidance")
return
await self.AD.logging.process_log_callbacks(namespace, data)
if self.AD.apps is True: # and namespace != "admin":
# Process callbacks
await self.process_event_callbacks(namespace, data)
#
# Send to the stream
#
if self.AD.http is not None:
if data["event_type"] == "state_changed":
if data["data"]["new_state"] == data["data"]["old_state"]:
# Nothing changed so don't send
return
# take a copy without TS if present as it breaks deepcopy and jason
if "ts" in data["data"]:
ts = data["data"].pop("ts")
mydata = deepcopy(data)
data["data"]["ts"] = ts
else:
mydata = deepcopy(data)
await self.AD.http.stream_update(namespace, mydata)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error during process_event()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
async def has_log_callback(self, name):
"""Returns ``True`` if the app has a log callback, ``False`` otherwise.
Used to prevent callback loops. In the calling logic, if this function returns
``True`` the resulting logging event will be suppressed.
Args:
name (str): Name of the app.
"""
has_log_callback = False
if name == "AppDaemon._stream":
has_log_callback = True
else:
async with self.AD.callbacks.callbacks_lock:
for callback in self.AD.callbacks.callbacks:
for _uuid in self.AD.callbacks.callbacks[callback]:
cb = self.AD.callbacks.callbacks[callback][_uuid]
if cb["name"] == name and cb["type"] == "event" and cb["event"] == "__AD_LOG_EVENT":
has_log_callback = True
elif cb["name"] == name and cb["type"] == "log":
has_log_callback = True
return has_log_callback
async def process_event_callbacks(self, namespace, data):
"""Processes a pure event callback.
Locate any callbacks that may be registered for this event, check for filters and if appropriate,
dispatch the event for further checking and eventual action.
Args:
namespace (str): Namespace of the event.
data: Data associated with the event.
Returns:
None.
"""
self.logger.debug("process_event_callbacks() %s %s", namespace, data)
removes = []
async with self.AD.callbacks.callbacks_lock:
for name in self.AD.callbacks.callbacks.keys():
for uuid_ in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][uuid_]
if callback["namespace"] == namespace or callback["namespace"] == "global" or namespace == "global":
#
# Check for either a blank event (for all events)
# Or the event is a match
# But don't allow a global listen for any system events (events that start with __)
#
if "event" in callback and (
(callback["event"] is None and data["event_type"][:2] != "__")
or data["event_type"] == callback["event"]
):
# Check any filters
_run = True
for key in callback["kwargs"]:
if key in data["data"] and callback["kwargs"][key] != data["data"][key]:
_run = False
if data["event_type"] == "__AD_LOG_EVENT":
if (
"log" in callback["kwargs"]
and callback["kwargs"]["log"] != data["data"]["log_type"]
):
_run = False
if _run:
if name in self.AD.app_management.objects:
executed = await self.AD.threading.dispatch_worker(
name,
{
"id": uuid_,
"name": name,
"objectid": self.AD.app_management.objects[name]["id"],
"type": "event",
"event": data["event_type"],
"function": callback["function"],
"data": data["data"],
"pin_app": callback["pin_app"],
"pin_thread": callback["pin_thread"],
"kwargs": callback["kwargs"],
},
)
# Remove the callback if appropriate
if executed is True:
remove = callback["kwargs"].get("oneshot", False)
if remove is True:
removes.append({"name": name, "uuid": uuid_})
for remove in removes:
await self.cancel_event_callback(remove["name"], remove["uuid"])
async def event_services(self, namespace, domain, service, kwargs):
if "event" in kwargs:
event = kwargs["event"]
del kwargs["event"]
await self.fire_event(namespace, event, **kwargs)
else:
self.logger.warning("Malformed 'fire_event' service call, as no event given")
@staticmethod
def sanitize_event_kwargs(app, kwargs):
kwargs_copy = kwargs.copy()
return utils._sanitize_kwargs(kwargs_copy, ["__silent"])
|
process_event_callbacks
|
Processes a pure event callback.
Locate any callbacks that may be registered for this event, check for filters and if appropriate,
dispatch the event for further checking and eventual action.
Args:
namespace (str): Namespace of the event.
data: Data associated with the event.
Returns:
None.
|
"""Module to handle all events within AppDaemon."""
import uuid
from copy import deepcopy
import traceback
import datetime
from appdaemon.appdaemon import AppDaemon
import appdaemon.utils as utils
class Events:
"""Encapsulate event handling."""
def __init__(self, ad: AppDaemon):
"""Constructor.
Args:
ad: Reference to the AppDaemon object
"""
self.AD = ad
self.logger = ad.logging.get_child("_events")
#
# Events
#
async def add_event_callback(self, name, namespace, cb, event, **kwargs):
"""Adds a callback for an event which is called internally by apps.
Args:
name (str): Name of the app.
namespace (str): Namespace of the event.
cb: Callback function.
event (str): Name of the event.
**kwargs: List of values to filter on, and additional arguments to pass to the callback.
Returns:
``None`` or the reference to the callback handle.
"""
if self.AD.threading.validate_pin(name, kwargs) is True:
if "pin" in kwargs:
pin_app = kwargs["pin_app"]
else:
pin_app = self.AD.app_management.objects[name]["pin_app"]
if "pin_thread" in kwargs:
pin_thread = kwargs["pin_thread"]
pin_app = True
else:
pin_thread = self.AD.app_management.objects[name]["pin_thread"]
async with self.AD.callbacks.callbacks_lock:
if name not in self.AD.callbacks.callbacks:
self.AD.callbacks.callbacks[name] = {}
handle = uuid.uuid4().hex
self.AD.callbacks.callbacks[name][handle] = {
"name": name,
"id": self.AD.app_management.objects[name]["id"],
"type": "event",
"function": cb,
"namespace": namespace,
"event": event,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs,
}
if "timeout" in kwargs:
exec_time = await self.AD.sched.get_now() + datetime.timedelta(seconds=int(kwargs["timeout"]))
kwargs["__timeout"] = await self.AD.sched.insert_schedule(
name, exec_time, None, False, None, __event_handle=handle,
)
await self.AD.state.add_entity(
"admin",
"event_callback.{}".format(handle),
"active",
{
"app": name,
"event_name": event,
"function": cb.__name__,
"pinned": pin_app,
"pinned_thread": pin_thread,
"fired": 0,
"executed": 0,
"kwargs": kwargs,
},
)
return handle
else:
return None
async def cancel_event_callback(self, name, handle):
"""Cancels an event callback.
Args:
name (str): Name of the app or module.
handle: Previously supplied callback handle for the callback.
Returns:
None.
"""
executed = False
async with self.AD.callbacks.callbacks_lock:
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
del self.AD.callbacks.callbacks[name][handle]
await self.AD.state.remove_entity("admin", "event_callback.{}".format(handle))
executed = True
if name in self.AD.callbacks.callbacks and self.AD.callbacks.callbacks[name] == {}:
del self.AD.callbacks.callbacks[name]
if not executed:
self.logger.warning(
"Invalid callback handle '{}' in cancel_event_callback() from app {}".format(handle, name)
)
return executed
async def info_event_callback(self, name, handle):
"""Gets the information of an event callback.
Args:
name (str): Name of the app or subsystem.
handle: Previously supplied handle for the callback.
Returns:
A dictionary of callback entries or rise a ``ValueError`` if an invalid handle is provided.
"""
async with self.AD.callbacks.callbacks_lock:
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][handle]
return callback["event"], callback["kwargs"].copy()
else:
raise ValueError("Invalid handle: {}".format(handle))
async def fire_event(self, namespace, event, **kwargs):
"""Fires an event.
If the namespace does not have a plugin associated with it, the event will be fired locally.
If a plugin is associated, the firing of the event will be delegated to the plugin, under the
understanding that when the event is fired, the plugin will notify appdaemon that it occurred,
usually via the system the plugin is communicating with.
Args:
namespace (str): Namespace for the event to be fired in.
event (str): Name of the event.
**kwargs: Arguments to associate with the event.
Returns:
None.
"""
self.logger.debug("fire_plugin_event() %s %s %s", namespace, event, kwargs)
plugin = await self.AD.plugins.get_plugin_object(namespace)
if hasattr(plugin, "fire_plugin_event"):
# We assume that the event will come back to us via the plugin
await plugin.fire_plugin_event(event, namespace, **kwargs)
else:
# Just fire the event locally
await self.AD.events.process_event(namespace, {"event_type": event, "data": kwargs})
async def process_event(self, namespace, data):
"""Processes an event that has been received either locally or from a plugin.
Args:
namespace (str): Namespace the event was fired in.
data: Data associated with the event.
Returns:
None.
"""
try:
# if data["event_type"] == "__AD_ENTITY_REMOVED":
# print("process event")
self.logger.debug("Event type:%s:", data["event_type"])
self.logger.debug(data["data"])
# Kick the scheduler so it updates it's clock
if self.AD.sched is not None and self.AD.sched.realtime is False and namespace != "admin":
await self.AD.sched.kick()
if data["event_type"] == "state_changed":
if "entity_id" in data["data"] and "new_state" in data["data"]:
if data["data"]["new_state"] is None:
# most likely it is a deleted entity
return
entity_id = data["data"]["entity_id"]
self.AD.state.set_state_simple(namespace, entity_id, data["data"]["new_state"])
if self.AD.apps is True and namespace != "admin":
await self.AD.state.process_state_callbacks(namespace, data)
else:
self.logger.warning("Malformed 'state_changed' event: %s", data["data"])
return
# Check for log callbacks and exit to prevent loops
if data["event_type"] == "__AD_LOG_EVENT":
if await self.has_log_callback(data["data"]["app_name"]):
self.logger.debug("Discarding event for loop avoidance")
return
await self.AD.logging.process_log_callbacks(namespace, data)
if self.AD.apps is True: # and namespace != "admin":
# Process callbacks
await self.process_event_callbacks(namespace, data)
#
# Send to the stream
#
if self.AD.http is not None:
if data["event_type"] == "state_changed":
if data["data"]["new_state"] == data["data"]["old_state"]:
# Nothing changed so don't send
return
# take a copy without TS if present as it breaks deepcopy and jason
if "ts" in data["data"]:
ts = data["data"].pop("ts")
mydata = deepcopy(data)
data["data"]["ts"] = ts
else:
mydata = deepcopy(data)
await self.AD.http.stream_update(namespace, mydata)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error during process_event()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
async def has_log_callback(self, name):
"""Returns ``True`` if the app has a log callback, ``False`` otherwise.
Used to prevent callback loops. In the calling logic, if this function returns
``True`` the resulting logging event will be suppressed.
Args:
name (str): Name of the app.
"""
has_log_callback = False
if name == "AppDaemon._stream":
has_log_callback = True
else:
async with self.AD.callbacks.callbacks_lock:
for callback in self.AD.callbacks.callbacks:
for _uuid in self.AD.callbacks.callbacks[callback]:
cb = self.AD.callbacks.callbacks[callback][_uuid]
if cb["name"] == name and cb["type"] == "event" and cb["event"] == "__AD_LOG_EVENT":
has_log_callback = True
elif cb["name"] == name and cb["type"] == "log":
has_log_callback = True
return has_log_callback
# MASKED: process_event_callbacks function (lines 281-353)
async def event_services(self, namespace, domain, service, kwargs):
if "event" in kwargs:
event = kwargs["event"]
del kwargs["event"]
await self.fire_event(namespace, event, **kwargs)
else:
self.logger.warning("Malformed 'fire_event' service call, as no event given")
@staticmethod
def sanitize_event_kwargs(app, kwargs):
kwargs_copy = kwargs.copy()
return utils._sanitize_kwargs(kwargs_copy, ["__silent"])
|
async def process_event_callbacks(self, namespace, data):
"""Processes a pure event callback.
Locate any callbacks that may be registered for this event, check for filters and if appropriate,
dispatch the event for further checking and eventual action.
Args:
namespace (str): Namespace of the event.
data: Data associated with the event.
Returns:
None.
"""
self.logger.debug("process_event_callbacks() %s %s", namespace, data)
removes = []
async with self.AD.callbacks.callbacks_lock:
for name in self.AD.callbacks.callbacks.keys():
for uuid_ in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][uuid_]
if callback["namespace"] == namespace or callback["namespace"] == "global" or namespace == "global":
#
# Check for either a blank event (for all events)
# Or the event is a match
# But don't allow a global listen for any system events (events that start with __)
#
if "event" in callback and (
(callback["event"] is None and data["event_type"][:2] != "__")
or data["event_type"] == callback["event"]
):
# Check any filters
_run = True
for key in callback["kwargs"]:
if key in data["data"] and callback["kwargs"][key] != data["data"][key]:
_run = False
if data["event_type"] == "__AD_LOG_EVENT":
if (
"log" in callback["kwargs"]
and callback["kwargs"]["log"] != data["data"]["log_type"]
):
_run = False
if _run:
if name in self.AD.app_management.objects:
executed = await self.AD.threading.dispatch_worker(
name,
{
"id": uuid_,
"name": name,
"objectid": self.AD.app_management.objects[name]["id"],
"type": "event",
"event": data["event_type"],
"function": callback["function"],
"data": data["data"],
"pin_app": callback["pin_app"],
"pin_thread": callback["pin_thread"],
"kwargs": callback["kwargs"],
},
)
# Remove the callback if appropriate
if executed is True:
remove = callback["kwargs"].get("oneshot", False)
if remove is True:
removes.append({"name": name, "uuid": uuid_})
for remove in removes:
await self.cancel_event_callback(remove["name"], remove["uuid"])
| 281 | 353 |
"""Module to handle all events within AppDaemon."""
import uuid
from copy import deepcopy
import traceback
import datetime
from appdaemon.appdaemon import AppDaemon
import appdaemon.utils as utils
class Events:
"""Encapsulate event handling."""
def __init__(self, ad: AppDaemon):
"""Constructor.
Args:
ad: Reference to the AppDaemon object
"""
self.AD = ad
self.logger = ad.logging.get_child("_events")
#
# Events
#
async def add_event_callback(self, name, namespace, cb, event, **kwargs):
"""Adds a callback for an event which is called internally by apps.
Args:
name (str): Name of the app.
namespace (str): Namespace of the event.
cb: Callback function.
event (str): Name of the event.
**kwargs: List of values to filter on, and additional arguments to pass to the callback.
Returns:
``None`` or the reference to the callback handle.
"""
if self.AD.threading.validate_pin(name, kwargs) is True:
if "pin" in kwargs:
pin_app = kwargs["pin_app"]
else:
pin_app = self.AD.app_management.objects[name]["pin_app"]
if "pin_thread" in kwargs:
pin_thread = kwargs["pin_thread"]
pin_app = True
else:
pin_thread = self.AD.app_management.objects[name]["pin_thread"]
async with self.AD.callbacks.callbacks_lock:
if name not in self.AD.callbacks.callbacks:
self.AD.callbacks.callbacks[name] = {}
handle = uuid.uuid4().hex
self.AD.callbacks.callbacks[name][handle] = {
"name": name,
"id": self.AD.app_management.objects[name]["id"],
"type": "event",
"function": cb,
"namespace": namespace,
"event": event,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs,
}
if "timeout" in kwargs:
exec_time = await self.AD.sched.get_now() + datetime.timedelta(seconds=int(kwargs["timeout"]))
kwargs["__timeout"] = await self.AD.sched.insert_schedule(
name, exec_time, None, False, None, __event_handle=handle,
)
await self.AD.state.add_entity(
"admin",
"event_callback.{}".format(handle),
"active",
{
"app": name,
"event_name": event,
"function": cb.__name__,
"pinned": pin_app,
"pinned_thread": pin_thread,
"fired": 0,
"executed": 0,
"kwargs": kwargs,
},
)
return handle
else:
return None
async def cancel_event_callback(self, name, handle):
"""Cancels an event callback.
Args:
name (str): Name of the app or module.
handle: Previously supplied callback handle for the callback.
Returns:
None.
"""
executed = False
async with self.AD.callbacks.callbacks_lock:
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
del self.AD.callbacks.callbacks[name][handle]
await self.AD.state.remove_entity("admin", "event_callback.{}".format(handle))
executed = True
if name in self.AD.callbacks.callbacks and self.AD.callbacks.callbacks[name] == {}:
del self.AD.callbacks.callbacks[name]
if not executed:
self.logger.warning(
"Invalid callback handle '{}' in cancel_event_callback() from app {}".format(handle, name)
)
return executed
async def info_event_callback(self, name, handle):
"""Gets the information of an event callback.
Args:
name (str): Name of the app or subsystem.
handle: Previously supplied handle for the callback.
Returns:
A dictionary of callback entries or rise a ``ValueError`` if an invalid handle is provided.
"""
async with self.AD.callbacks.callbacks_lock:
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][handle]
return callback["event"], callback["kwargs"].copy()
else:
raise ValueError("Invalid handle: {}".format(handle))
async def fire_event(self, namespace, event, **kwargs):
"""Fires an event.
If the namespace does not have a plugin associated with it, the event will be fired locally.
If a plugin is associated, the firing of the event will be delegated to the plugin, under the
understanding that when the event is fired, the plugin will notify appdaemon that it occurred,
usually via the system the plugin is communicating with.
Args:
namespace (str): Namespace for the event to be fired in.
event (str): Name of the event.
**kwargs: Arguments to associate with the event.
Returns:
None.
"""
self.logger.debug("fire_plugin_event() %s %s %s", namespace, event, kwargs)
plugin = await self.AD.plugins.get_plugin_object(namespace)
if hasattr(plugin, "fire_plugin_event"):
# We assume that the event will come back to us via the plugin
await plugin.fire_plugin_event(event, namespace, **kwargs)
else:
# Just fire the event locally
await self.AD.events.process_event(namespace, {"event_type": event, "data": kwargs})
async def process_event(self, namespace, data):
"""Processes an event that has been received either locally or from a plugin.
Args:
namespace (str): Namespace the event was fired in.
data: Data associated with the event.
Returns:
None.
"""
try:
# if data["event_type"] == "__AD_ENTITY_REMOVED":
# print("process event")
self.logger.debug("Event type:%s:", data["event_type"])
self.logger.debug(data["data"])
# Kick the scheduler so it updates it's clock
if self.AD.sched is not None and self.AD.sched.realtime is False and namespace != "admin":
await self.AD.sched.kick()
if data["event_type"] == "state_changed":
if "entity_id" in data["data"] and "new_state" in data["data"]:
if data["data"]["new_state"] is None:
# most likely it is a deleted entity
return
entity_id = data["data"]["entity_id"]
self.AD.state.set_state_simple(namespace, entity_id, data["data"]["new_state"])
if self.AD.apps is True and namespace != "admin":
await self.AD.state.process_state_callbacks(namespace, data)
else:
self.logger.warning("Malformed 'state_changed' event: %s", data["data"])
return
# Check for log callbacks and exit to prevent loops
if data["event_type"] == "__AD_LOG_EVENT":
if await self.has_log_callback(data["data"]["app_name"]):
self.logger.debug("Discarding event for loop avoidance")
return
await self.AD.logging.process_log_callbacks(namespace, data)
if self.AD.apps is True: # and namespace != "admin":
# Process callbacks
await self.process_event_callbacks(namespace, data)
#
# Send to the stream
#
if self.AD.http is not None:
if data["event_type"] == "state_changed":
if data["data"]["new_state"] == data["data"]["old_state"]:
# Nothing changed so don't send
return
# take a copy without TS if present as it breaks deepcopy and jason
if "ts" in data["data"]:
ts = data["data"].pop("ts")
mydata = deepcopy(data)
data["data"]["ts"] = ts
else:
mydata = deepcopy(data)
await self.AD.http.stream_update(namespace, mydata)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error during process_event()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
async def has_log_callback(self, name):
"""Returns ``True`` if the app has a log callback, ``False`` otherwise.
Used to prevent callback loops. In the calling logic, if this function returns
``True`` the resulting logging event will be suppressed.
Args:
name (str): Name of the app.
"""
has_log_callback = False
if name == "AppDaemon._stream":
has_log_callback = True
else:
async with self.AD.callbacks.callbacks_lock:
for callback in self.AD.callbacks.callbacks:
for _uuid in self.AD.callbacks.callbacks[callback]:
cb = self.AD.callbacks.callbacks[callback][_uuid]
if cb["name"] == name and cb["type"] == "event" and cb["event"] == "__AD_LOG_EVENT":
has_log_callback = True
elif cb["name"] == name and cb["type"] == "log":
has_log_callback = True
return has_log_callback
async def process_event_callbacks(self, namespace, data):
"""Processes a pure event callback.
Locate any callbacks that may be registered for this event, check for filters and if appropriate,
dispatch the event for further checking and eventual action.
Args:
namespace (str): Namespace of the event.
data: Data associated with the event.
Returns:
None.
"""
self.logger.debug("process_event_callbacks() %s %s", namespace, data)
removes = []
async with self.AD.callbacks.callbacks_lock:
for name in self.AD.callbacks.callbacks.keys():
for uuid_ in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][uuid_]
if callback["namespace"] == namespace or callback["namespace"] == "global" or namespace == "global":
#
# Check for either a blank event (for all events)
# Or the event is a match
# But don't allow a global listen for any system events (events that start with __)
#
if "event" in callback and (
(callback["event"] is None and data["event_type"][:2] != "__")
or data["event_type"] == callback["event"]
):
# Check any filters
_run = True
for key in callback["kwargs"]:
if key in data["data"] and callback["kwargs"][key] != data["data"][key]:
_run = False
if data["event_type"] == "__AD_LOG_EVENT":
if (
"log" in callback["kwargs"]
and callback["kwargs"]["log"] != data["data"]["log_type"]
):
_run = False
if _run:
if name in self.AD.app_management.objects:
executed = await self.AD.threading.dispatch_worker(
name,
{
"id": uuid_,
"name": name,
"objectid": self.AD.app_management.objects[name]["id"],
"type": "event",
"event": data["event_type"],
"function": callback["function"],
"data": data["data"],
"pin_app": callback["pin_app"],
"pin_thread": callback["pin_thread"],
"kwargs": callback["kwargs"],
},
)
# Remove the callback if appropriate
if executed is True:
remove = callback["kwargs"].get("oneshot", False)
if remove is True:
removes.append({"name": name, "uuid": uuid_})
for remove in removes:
await self.cancel_event_callback(remove["name"], remove["uuid"])
async def event_services(self, namespace, domain, service, kwargs):
if "event" in kwargs:
event = kwargs["event"]
del kwargs["event"]
await self.fire_event(namespace, event, **kwargs)
else:
self.logger.warning("Malformed 'fire_event' service call, as no event given")
@staticmethod
def sanitize_event_kwargs(app, kwargs):
kwargs_copy = kwargs.copy()
return utils._sanitize_kwargs(kwargs_copy, ["__silent"])
|
send_msg
|
Find the socket descriptor mapped by workerId and send them a message.
Args:
workerId: UUID string used to identify and retrieve a worker.
message: Message to be send.
|
import queue
from ..workers import Worker
from ..codes import WORKER_PROPERTIES
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class SocketHandler(metaclass=Singleton):
""" Socket Handler is a sigleton class used to handle/manage websocket connections. """
def __init__(self):
self.connections = {}
def new_connection(self, workerId: str, socket):
""" Create a mapping structure to establish a bond between a workerId and a socket descriptor.
Args:
workerId: UUID string used to identify workers.
socket: Socket descriptor that will be used to send/receive messages from this client.
Returns:
Worker: a worker instance with the corresponding workerId
"""
if workerId not in self.connections:
self.connections[workerId] = Worker(workerId, socket)
else:
worker = self.connections[workerId]
if worker.status == WORKER_PROPERTIES.OFFLINE:
worker._socket = socket
return self.connections[workerId]
# MASKED: send_msg function (lines 40-49)
def get(self, query):
"""Retrieve a worker by its UUID string or its socket descriptor."""
if isinstance(query, str):
return self.connections.get(query, None)
else:
return self.__retrieve_worker_by_socket(query)
def remove(self, socket) -> str:
""" Remove a socket descriptor from mapping structure. It will be used when the socket connection is closed.
Args:
socket: socket descriptor used to send/receive messages.
Returns:
workerId: Worker id linked to that connection.
"""
worker = self.__retrieve_worker_by_socket(socket)
if worker:
self.connections[worker._id]._socket = None
self.connections[worker._id].connected_nodes = []
return worker._id
def __retrieve_worker_by_socket(self, socket):
for worker_id, worker in self.connections.items():
if worker._socket == socket:
return self.connections[worker_id]
@property
def nodes(self) -> list:
"""Return all the connected nodes as a list of tuples of (worker_id, worker)"""
return list(self.connections.items())
def __len__(self) -> int:
""" Number of connections handled by this server.
Returns:
length: number of connections handled by this server.
"""
return len(self.connections)
|
def send_msg(self, workerId: str, message: str):
""" Find the socket descriptor mapped by workerId and send them a message.
Args:
workerId: UUID string used to identify and retrieve a worker.
message: Message to be send.
"""
socket = self.connections.get(workerId, None)
if socket:
socket.send(message)
| 40 | 49 |
import queue
from ..workers import Worker
from ..codes import WORKER_PROPERTIES
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class SocketHandler(metaclass=Singleton):
""" Socket Handler is a sigleton class used to handle/manage websocket connections. """
def __init__(self):
self.connections = {}
def new_connection(self, workerId: str, socket):
""" Create a mapping structure to establish a bond between a workerId and a socket descriptor.
Args:
workerId: UUID string used to identify workers.
socket: Socket descriptor that will be used to send/receive messages from this client.
Returns:
Worker: a worker instance with the corresponding workerId
"""
if workerId not in self.connections:
self.connections[workerId] = Worker(workerId, socket)
else:
worker = self.connections[workerId]
if worker.status == WORKER_PROPERTIES.OFFLINE:
worker._socket = socket
return self.connections[workerId]
def send_msg(self, workerId: str, message: str):
""" Find the socket descriptor mapped by workerId and send them a message.
Args:
workerId: UUID string used to identify and retrieve a worker.
message: Message to be send.
"""
socket = self.connections.get(workerId, None)
if socket:
socket.send(message)
def get(self, query):
"""Retrieve a worker by its UUID string or its socket descriptor."""
if isinstance(query, str):
return self.connections.get(query, None)
else:
return self.__retrieve_worker_by_socket(query)
def remove(self, socket) -> str:
""" Remove a socket descriptor from mapping structure. It will be used when the socket connection is closed.
Args:
socket: socket descriptor used to send/receive messages.
Returns:
workerId: Worker id linked to that connection.
"""
worker = self.__retrieve_worker_by_socket(socket)
if worker:
self.connections[worker._id]._socket = None
self.connections[worker._id].connected_nodes = []
return worker._id
def __retrieve_worker_by_socket(self, socket):
for worker_id, worker in self.connections.items():
if worker._socket == socket:
return self.connections[worker_id]
@property
def nodes(self) -> list:
"""Return all the connected nodes as a list of tuples of (worker_id, worker)"""
return list(self.connections.items())
def __len__(self) -> int:
""" Number of connections handled by this server.
Returns:
length: number of connections handled by this server.
"""
return len(self.connections)
|
forward
|
Decode the bbox and do NMS if needed.
Args:
head_out (tuple): bbox_pred and cls_prob of bbox_head output.
rois (tuple): roi and rois_num of rpn_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register
from ppdet.modeling.bbox_utils import nonempty_bbox, rbox2poly
from ppdet.modeling.layers import TTFBox
from .transformers import bbox_cxcywh_to_xyxy
try:
from collections.abc import Sequence
except Exception:
from collections import Sequence
__all__ = [
'BBoxPostProcess', 'MaskPostProcess', 'FCOSPostProcess',
'S2ANetBBoxPostProcess', 'JDEBBoxPostProcess', 'CenterNetPostProcess',
'DETRBBoxPostProcess', 'SparsePostProcess'
]
@register
class BBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['decode', 'nms']
def __init__(self, num_classes=80, decode=None, nms=None):
super(BBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.decode = decode
self.nms = nms
self.fake_bboxes = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
# MASKED: forward function (lines 50-72)
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
"""
Rescale, clip and filter the bbox from the output of NMS to
get final prediction.
Notes:
Currently only support bs = 1.
Args:
bboxes (Tensor): The output bboxes with shape [N, 6] after decode
and NMS, including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
pred_result (Tensor): The final prediction results with shape [N, 6]
including labels, scores and bboxes.
"""
if bboxes.shape[0] == 0:
bboxes = self.fake_bboxes
bbox_num = self.fake_bbox_num
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.expand(origin_shape[i:i + 1, :],
[bbox_num[i], 2])
scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
scale = paddle.concat([scale_x, scale_y, scale_x, scale_y])
expand_scale = paddle.expand(scale, [bbox_num[i], 4])
origin_shape_list.append(expand_shape)
scale_factor_list.append(expand_scale)
self.origin_shape_list = paddle.concat(origin_shape_list)
scale_factor_list = paddle.concat(scale_factor_list)
# bboxes: [N, 6], label, score, bbox
pred_label = bboxes[:, 0:1]
pred_score = bboxes[:, 1:2]
pred_bbox = bboxes[:, 2:]
# rescale bbox to original image
scaled_bbox = pred_bbox / scale_factor_list
origin_h = self.origin_shape_list[:, 0]
origin_w = self.origin_shape_list[:, 1]
zeros = paddle.zeros_like(origin_h)
# clip bbox to [0, original_size]
x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros)
y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros)
x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros)
y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros)
pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1)
# filter empty bbox
keep_mask = nonempty_bbox(pred_bbox, return_mask=True)
keep_mask = paddle.unsqueeze(keep_mask, [1])
pred_label = paddle.where(keep_mask, pred_label,
paddle.ones_like(pred_label) * -1)
pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1)
return pred_result
def get_origin_shape(self, ):
return self.origin_shape_list
@register
class MaskPostProcess(object):
"""
refer to:
https://github.com/facebookresearch/detectron2/layers/mask_ops.py
Get Mask output according to the output from model
"""
def __init__(self, binary_thresh=0.5):
super(MaskPostProcess, self).__init__()
self.binary_thresh = binary_thresh
def paste_mask(self, masks, boxes, im_h, im_w):
"""
Paste the mask prediction to the original image.
"""
x0, y0, x1, y1 = paddle.split(boxes, 4, axis=1)
masks = paddle.unsqueeze(masks, [0, 1])
img_y = paddle.arange(0, im_h, dtype='float32') + 0.5
img_x = paddle.arange(0, im_w, dtype='float32') + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
img_x = paddle.unsqueeze(img_x, [1])
img_y = paddle.unsqueeze(img_y, [2])
N = boxes.shape[0]
gx = paddle.expand(img_x, [N, img_y.shape[1], img_x.shape[2]])
gy = paddle.expand(img_y, [N, img_y.shape[1], img_x.shape[2]])
grid = paddle.stack([gx, gy], axis=3)
img_masks = F.grid_sample(masks, grid, align_corners=False)
return img_masks[:, 0]
def __call__(self, mask_out, bboxes, bbox_num, origin_shape):
"""
Decode the mask_out and paste the mask to the origin image.
Args:
mask_out (Tensor): mask_head output with shape [N, 28, 28].
bbox_pred (Tensor): The output bboxes with shape [N, 6] after decode
and NMS, including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
origin_shape (Tensor): The origin shape of the input image, the tensor
shape is [N, 2], and each row is [h, w].
Returns:
pred_result (Tensor): The final prediction mask results with shape
[N, h, w] in binary mask style.
"""
num_mask = mask_out.shape[0]
origin_shape = paddle.cast(origin_shape, 'int32')
# TODO: support bs > 1 and mask output dtype is bool
pred_result = paddle.zeros(
[num_mask, origin_shape[0][0], origin_shape[0][1]], dtype='int32')
if bbox_num == 1 and bboxes[0][0] == -1:
return pred_result
# TODO: optimize chunk paste
pred_result = []
for i in range(bboxes.shape[0]):
im_h, im_w = origin_shape[i][0], origin_shape[i][1]
pred_mask = self.paste_mask(mask_out[i], bboxes[i:i + 1, 2:], im_h,
im_w)
pred_mask = pred_mask >= self.binary_thresh
pred_mask = paddle.cast(pred_mask, 'int32')
pred_result.append(pred_mask)
pred_result = paddle.concat(pred_result)
return pred_result
@register
class FCOSPostProcess(object):
__inject__ = ['decode', 'nms']
def __init__(self, decode=None, nms=None):
super(FCOSPostProcess, self).__init__()
self.decode = decode
self.nms = nms
def __call__(self, fcos_head_outs, scale_factor):
"""
Decode the bbox and do NMS in FCOS.
"""
locations, cls_logits, bboxes_reg, centerness = fcos_head_outs
bboxes, score = self.decode(locations, cls_logits, bboxes_reg,
centerness, scale_factor)
bbox_pred, bbox_num, _ = self.nms(bboxes, score)
return bbox_pred, bbox_num
@register
class S2ANetBBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['nms']
def __init__(self, num_classes=15, nms_pre=2000, min_bbox_size=0, nms=None):
super(S2ANetBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.nms_pre = paddle.to_tensor(nms_pre)
self.min_bbox_size = min_bbox_size
self.nms = nms
self.origin_shape_list = []
self.fake_pred_cls_score_bbox = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
def forward(self, pred_scores, pred_bboxes):
"""
pred_scores : [N, M] score
pred_bboxes : [N, 5] xc, yc, w, h, a
im_shape : [N, 2] im_shape
scale_factor : [N, 2] scale_factor
"""
pred_ploys0 = rbox2poly(pred_bboxes)
pred_ploys = paddle.unsqueeze(pred_ploys0, axis=0)
# pred_scores [NA, 16] --> [16, NA]
pred_scores0 = paddle.transpose(pred_scores, [1, 0])
pred_scores = paddle.unsqueeze(pred_scores0, axis=0)
pred_cls_score_bbox, bbox_num, _ = self.nms(pred_ploys, pred_scores,
self.num_classes)
# Prevent empty bbox_pred from decode or NMS.
# Bboxes and score before NMS may be empty due to the score threshold.
if pred_cls_score_bbox.shape[0] <= 0 or pred_cls_score_bbox.shape[
1] <= 1:
pred_cls_score_bbox = self.fake_pred_cls_score_bbox
bbox_num = self.fake_bbox_num
pred_cls_score_bbox = paddle.reshape(pred_cls_score_bbox, [-1, 10])
return pred_cls_score_bbox, bbox_num
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
"""
Rescale, clip and filter the bbox from the output of NMS to
get final prediction.
Args:
bboxes(Tensor): bboxes [N, 10]
bbox_num(Tensor): bbox_num
im_shape(Tensor): [1 2]
scale_factor(Tensor): [1 2]
Returns:
bbox_pred(Tensor): The output is the prediction with shape [N, 8]
including labels, scores and bboxes. The size of
bboxes are corresponding to the original image.
"""
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.expand(origin_shape[i:i + 1, :],
[bbox_num[i], 2])
scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
scale = paddle.concat([
scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x,
scale_y
])
expand_scale = paddle.expand(scale, [bbox_num[i], 8])
origin_shape_list.append(expand_shape)
scale_factor_list.append(expand_scale)
origin_shape_list = paddle.concat(origin_shape_list)
scale_factor_list = paddle.concat(scale_factor_list)
# bboxes: [N, 10], label, score, bbox
pred_label_score = bboxes[:, 0:2]
pred_bbox = bboxes[:, 2:]
# rescale bbox to original image
pred_bbox = pred_bbox.reshape([-1, 8])
scaled_bbox = pred_bbox / scale_factor_list
origin_h = origin_shape_list[:, 0]
origin_w = origin_shape_list[:, 1]
bboxes = scaled_bbox
zeros = paddle.zeros_like(origin_h)
x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], origin_w - 1), zeros)
y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], origin_h - 1), zeros)
x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], origin_w - 1), zeros)
y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], origin_h - 1), zeros)
x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], origin_w - 1), zeros)
y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], origin_h - 1), zeros)
x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], origin_w - 1), zeros)
y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], origin_h - 1), zeros)
pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=-1)
pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1)
return pred_result
@register
class JDEBBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['decode', 'nms']
def __init__(self, num_classes=1, decode=None, nms=None, return_idx=True):
super(JDEBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.decode = decode
self.nms = nms
self.return_idx = return_idx
self.fake_bbox_pred = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
self.fake_nms_keep_idx = paddle.to_tensor(
np.array(
[[0]], dtype='int32'))
self.fake_yolo_boxes_out = paddle.to_tensor(
np.array(
[[[0.0, 0.0, 0.0, 0.0]]], dtype='float32'))
self.fake_yolo_scores_out = paddle.to_tensor(
np.array(
[[[0.0]]], dtype='float32'))
self.fake_boxes_idx = paddle.to_tensor(np.array([[0]], dtype='int64'))
def forward(self, head_out, anchors):
"""
Decode the bbox and do NMS for JDE model.
Args:
head_out (list): Bbox_pred and cls_prob of bbox_head output.
anchors (list): Anchors of JDE model.
Returns:
boxes_idx (Tensor): The index of kept bboxes after decode 'JDEBox'.
bbox_pred (Tensor): The output is the prediction with shape [N, 6]
including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction of each batch with shape [N].
nms_keep_idx (Tensor): The index of kept bboxes after NMS.
"""
boxes_idx, yolo_boxes_scores = self.decode(head_out, anchors)
if len(boxes_idx) == 0:
boxes_idx = self.fake_boxes_idx
yolo_boxes_out = self.fake_yolo_boxes_out
yolo_scores_out = self.fake_yolo_scores_out
else:
yolo_boxes = paddle.gather_nd(yolo_boxes_scores, boxes_idx)
# TODO: only support bs=1 now
yolo_boxes_out = paddle.reshape(
yolo_boxes[:, :4], shape=[1, len(boxes_idx), 4])
yolo_scores_out = paddle.reshape(
yolo_boxes[:, 4:5], shape=[1, 1, len(boxes_idx)])
boxes_idx = boxes_idx[:, 1:]
if self.return_idx:
bbox_pred, bbox_num, nms_keep_idx = self.nms(
yolo_boxes_out, yolo_scores_out, self.num_classes)
if bbox_pred.shape[0] == 0:
bbox_pred = self.fake_bbox_pred
bbox_num = self.fake_bbox_num
nms_keep_idx = self.fake_nms_keep_idx
return boxes_idx, bbox_pred, bbox_num, nms_keep_idx
else:
bbox_pred, bbox_num, _ = self.nms(yolo_boxes_out, yolo_scores_out,
self.num_classes)
if bbox_pred.shape[0] == 0:
bbox_pred = self.fake_bbox_pred
bbox_num = self.fake_bbox_num
return _, bbox_pred, bbox_num, _
@register
class CenterNetPostProcess(TTFBox):
"""
Postprocess the model outputs to get final prediction:
1. Do NMS for heatmap to get top `max_per_img` bboxes.
2. Decode bboxes using center offset and box size.
3. Rescale decoded bboxes reference to the origin image shape.
Args:
max_per_img(int): the maximum number of predicted objects in a image,
500 by default.
down_ratio(int): the down ratio from images to heatmap, 4 by default.
regress_ltrb (bool): whether to regress left/top/right/bottom or
width/height for a box, true by default.
for_mot (bool): whether return other features used in tracking model.
"""
__shared__ = ['down_ratio', 'for_mot']
def __init__(self,
max_per_img=500,
down_ratio=4,
regress_ltrb=True,
for_mot=False):
super(TTFBox, self).__init__()
self.max_per_img = max_per_img
self.down_ratio = down_ratio
self.regress_ltrb = regress_ltrb
self.for_mot = for_mot
def __call__(self, hm, wh, reg, im_shape, scale_factor):
heat = self._simple_nms(hm)
scores, inds, topk_clses, ys, xs = self._topk(heat)
scores = paddle.tensor.unsqueeze(scores, [1])
clses = paddle.tensor.unsqueeze(topk_clses, [1])
reg_t = paddle.transpose(reg, [0, 2, 3, 1])
# Like TTFBox, batch size is 1.
# TODO: support batch size > 1
reg = paddle.reshape(reg_t, [-1, paddle.shape(reg_t)[-1]])
reg = paddle.gather(reg, inds)
xs = paddle.cast(xs, 'float32')
ys = paddle.cast(ys, 'float32')
xs = xs + reg[:, 0:1]
ys = ys + reg[:, 1:2]
wh_t = paddle.transpose(wh, [0, 2, 3, 1])
wh = paddle.reshape(wh_t, [-1, paddle.shape(wh_t)[-1]])
wh = paddle.gather(wh, inds)
if self.regress_ltrb:
x1 = xs - wh[:, 0:1]
y1 = ys - wh[:, 1:2]
x2 = xs + wh[:, 2:3]
y2 = ys + wh[:, 3:4]
else:
x1 = xs - wh[:, 0:1] / 2
y1 = ys - wh[:, 1:2] / 2
x2 = xs + wh[:, 0:1] / 2
y2 = ys + wh[:, 1:2] / 2
n, c, feat_h, feat_w = hm.shape[:]
padw = (feat_w * self.down_ratio - im_shape[0, 1]) / 2
padh = (feat_h * self.down_ratio - im_shape[0, 0]) / 2
x1 = x1 * self.down_ratio
y1 = y1 * self.down_ratio
x2 = x2 * self.down_ratio
y2 = y2 * self.down_ratio
x1 = x1 - padw
y1 = y1 - padh
x2 = x2 - padw
y2 = y2 - padh
bboxes = paddle.concat([x1, y1, x2, y2], axis=1)
scale_y = scale_factor[:, 0:1]
scale_x = scale_factor[:, 1:2]
scale_expand = paddle.concat(
[scale_x, scale_y, scale_x, scale_y], axis=1)
boxes_shape = paddle.shape(bboxes)
boxes_shape.stop_gradient = True
scale_expand = paddle.expand(scale_expand, shape=boxes_shape)
bboxes = paddle.divide(bboxes, scale_expand)
if self.for_mot:
results = paddle.concat([bboxes, scores, clses], axis=1)
return results, inds, topk_clses
else:
results = paddle.concat([clses, scores, bboxes], axis=1)
return results, paddle.shape(results)[0:1], topk_clses
@register
class DETRBBoxPostProcess(object):
__shared__ = ['num_classes', 'use_focal_loss']
__inject__ = []
def __init__(self,
num_classes=80,
num_top_queries=100,
use_focal_loss=False):
super(DETRBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.num_top_queries = num_top_queries
self.use_focal_loss = use_focal_loss
def __call__(self, head_out, im_shape, scale_factor):
"""
Decode the bbox.
Args:
head_out (tuple): bbox_pred, cls_logit and masks of bbox_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [bs], and is N.
"""
bboxes, logits, masks = head_out
bbox_pred = bbox_cxcywh_to_xyxy(bboxes)
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
img_h, img_w = origin_shape.unbind(1)
origin_shape = paddle.stack(
[img_w, img_h, img_w, img_h], axis=-1).unsqueeze(0)
bbox_pred *= origin_shape
scores = F.sigmoid(logits) if self.use_focal_loss else F.softmax(
logits)[:, :, :-1]
if not self.use_focal_loss:
scores, labels = scores.max(-1), scores.argmax(-1)
if scores.shape[1] > self.num_top_queries:
scores, index = paddle.topk(
scores, self.num_top_queries, axis=-1)
labels = paddle.stack(
[paddle.gather(l, i) for l, i in zip(labels, index)])
bbox_pred = paddle.stack(
[paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
else:
scores, index = paddle.topk(
scores.reshape([logits.shape[0], -1]),
self.num_top_queries,
axis=-1)
labels = index % logits.shape[2]
index = index // logits.shape[2]
bbox_pred = paddle.stack(
[paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
bbox_pred = paddle.concat(
[
labels.unsqueeze(-1).astype('float32'), scores.unsqueeze(-1),
bbox_pred
],
axis=-1)
bbox_num = paddle.to_tensor(
bbox_pred.shape[1], dtype='int32').tile([bbox_pred.shape[0]])
bbox_pred = bbox_pred.reshape([-1, 6])
return bbox_pred, bbox_num
@register
class SparsePostProcess(object):
__shared__ = ['num_classes']
def __init__(self, num_proposals, num_classes=80):
super(SparsePostProcess, self).__init__()
self.num_classes = num_classes
self.num_proposals = num_proposals
def __call__(self, box_cls, box_pred, scale_factor_wh, img_whwh):
"""
Arguments:
box_cls (Tensor): tensor of shape (batch_size, num_proposals, K).
The tensor predicts the classification probability for each proposal.
box_pred (Tensor): tensors of shape (batch_size, num_proposals, 4).
The tensor predicts 4-vector (x,y,w,h) box
regression values for every proposal
scale_factor_wh (Tensor): tensors of shape [batch_size, 2] the scalor of per img
img_whwh (Tensor): tensors of shape [batch_size, 4]
Returns:
bbox_pred (Tensor): tensors of shape [num_boxes, 6] Each row has 6 values:
[label, confidence, xmin, ymin, xmax, ymax]
bbox_num (Tensor): tensors of shape [batch_size] the number of RoIs in each image.
"""
assert len(box_cls) == len(scale_factor_wh) == len(img_whwh)
img_wh = img_whwh[:, :2]
scores = F.sigmoid(box_cls)
labels = paddle.arange(0, self.num_classes). \
unsqueeze(0).tile([self.num_proposals, 1]).flatten(start_axis=0, stop_axis=1)
classes_all = []
scores_all = []
boxes_all = []
for i, (scores_per_image,
box_pred_per_image) in enumerate(zip(scores, box_pred)):
scores_per_image, topk_indices = scores_per_image.flatten(
0, 1).topk(
self.num_proposals, sorted=False)
labels_per_image = paddle.gather(labels, topk_indices, axis=0)
box_pred_per_image = box_pred_per_image.reshape([-1, 1, 4]).tile(
[1, self.num_classes, 1]).reshape([-1, 4])
box_pred_per_image = paddle.gather(
box_pred_per_image, topk_indices, axis=0)
classes_all.append(labels_per_image)
scores_all.append(scores_per_image)
boxes_all.append(box_pred_per_image)
bbox_num = paddle.zeros([len(scale_factor_wh)], dtype="int32")
boxes_final = []
for i in range(len(scale_factor_wh)):
classes = classes_all[i]
boxes = boxes_all[i]
scores = scores_all[i]
boxes[:, 0::2] = paddle.clip(
boxes[:, 0::2], min=0, max=img_wh[i][0]) / scale_factor_wh[i][0]
boxes[:, 1::2] = paddle.clip(
boxes[:, 1::2], min=0, max=img_wh[i][1]) / scale_factor_wh[i][1]
boxes_w, boxes_h = (boxes[:, 2] - boxes[:, 0]).numpy(), (
boxes[:, 3] - boxes[:, 1]).numpy()
keep = (boxes_w > 1.) & (boxes_h > 1.)
if (keep.sum() == 0):
bboxes = paddle.zeros([1, 6]).astype("float32")
else:
boxes = paddle.to_tensor(boxes.numpy()[keep]).astype("float32")
classes = paddle.to_tensor(classes.numpy()[keep]).astype(
"float32").unsqueeze(-1)
scores = paddle.to_tensor(scores.numpy()[keep]).astype(
"float32").unsqueeze(-1)
bboxes = paddle.concat([classes, scores, boxes], axis=-1)
boxes_final.append(bboxes)
bbox_num[i] = bboxes.shape[0]
bbox_pred = paddle.concat(boxes_final)
return bbox_pred, bbox_num
def nms(dets, thresh):
"""Apply classic DPM-style greedy NMS."""
if dets.shape[0] == 0:
return dets[[], :]
scores = dets[:, 0]
x1 = dets[:, 1]
y1 = dets[:, 2]
x2 = dets[:, 3]
y2 = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
ndets = dets.shape[0]
suppressed = np.zeros((ndets), dtype=np.int)
# nominal indices
# _i, _j
# sorted indices
# i, j
# temp variables for box i's (the box currently under consideration)
# ix1, iy1, ix2, iy2, iarea
# variables for computing overlap with box j (lower scoring box)
# xx1, yy1, xx2, yy2
# w, h
# inter, ovr
for _i in range(ndets):
i = order[_i]
if suppressed[i] == 1:
continue
ix1 = x1[i]
iy1 = y1[i]
ix2 = x2[i]
iy2 = y2[i]
iarea = areas[i]
for _j in range(_i + 1, ndets):
j = order[_j]
if suppressed[j] == 1:
continue
xx1 = max(ix1, x1[j])
yy1 = max(iy1, y1[j])
xx2 = min(ix2, x2[j])
yy2 = min(iy2, y2[j])
w = max(0.0, xx2 - xx1 + 1)
h = max(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (iarea + areas[j] - inter)
if ovr >= thresh:
suppressed[j] = 1
keep = np.where(suppressed == 0)[0]
dets = dets[keep, :]
return dets
|
def forward(self, head_out, rois, im_shape, scale_factor):
"""
Decode the bbox and do NMS if needed.
Args:
head_out (tuple): bbox_pred and cls_prob of bbox_head output.
rois (tuple): roi and rois_num of rpn_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
"""
if self.nms is not None:
bboxes, score = self.decode(head_out, rois, im_shape, scale_factor)
bbox_pred, bbox_num, _ = self.nms(bboxes, score, self.num_classes)
else:
bbox_pred, bbox_num = self.decode(head_out, rois, im_shape,
scale_factor)
return bbox_pred, bbox_num
| 50 | 72 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register
from ppdet.modeling.bbox_utils import nonempty_bbox, rbox2poly
from ppdet.modeling.layers import TTFBox
from .transformers import bbox_cxcywh_to_xyxy
try:
from collections.abc import Sequence
except Exception:
from collections import Sequence
__all__ = [
'BBoxPostProcess', 'MaskPostProcess', 'FCOSPostProcess',
'S2ANetBBoxPostProcess', 'JDEBBoxPostProcess', 'CenterNetPostProcess',
'DETRBBoxPostProcess', 'SparsePostProcess'
]
@register
class BBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['decode', 'nms']
def __init__(self, num_classes=80, decode=None, nms=None):
super(BBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.decode = decode
self.nms = nms
self.fake_bboxes = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
def forward(self, head_out, rois, im_shape, scale_factor):
"""
Decode the bbox and do NMS if needed.
Args:
head_out (tuple): bbox_pred and cls_prob of bbox_head output.
rois (tuple): roi and rois_num of rpn_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
"""
if self.nms is not None:
bboxes, score = self.decode(head_out, rois, im_shape, scale_factor)
bbox_pred, bbox_num, _ = self.nms(bboxes, score, self.num_classes)
else:
bbox_pred, bbox_num = self.decode(head_out, rois, im_shape,
scale_factor)
return bbox_pred, bbox_num
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
"""
Rescale, clip and filter the bbox from the output of NMS to
get final prediction.
Notes:
Currently only support bs = 1.
Args:
bboxes (Tensor): The output bboxes with shape [N, 6] after decode
and NMS, including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
pred_result (Tensor): The final prediction results with shape [N, 6]
including labels, scores and bboxes.
"""
if bboxes.shape[0] == 0:
bboxes = self.fake_bboxes
bbox_num = self.fake_bbox_num
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.expand(origin_shape[i:i + 1, :],
[bbox_num[i], 2])
scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
scale = paddle.concat([scale_x, scale_y, scale_x, scale_y])
expand_scale = paddle.expand(scale, [bbox_num[i], 4])
origin_shape_list.append(expand_shape)
scale_factor_list.append(expand_scale)
self.origin_shape_list = paddle.concat(origin_shape_list)
scale_factor_list = paddle.concat(scale_factor_list)
# bboxes: [N, 6], label, score, bbox
pred_label = bboxes[:, 0:1]
pred_score = bboxes[:, 1:2]
pred_bbox = bboxes[:, 2:]
# rescale bbox to original image
scaled_bbox = pred_bbox / scale_factor_list
origin_h = self.origin_shape_list[:, 0]
origin_w = self.origin_shape_list[:, 1]
zeros = paddle.zeros_like(origin_h)
# clip bbox to [0, original_size]
x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros)
y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros)
x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros)
y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros)
pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1)
# filter empty bbox
keep_mask = nonempty_bbox(pred_bbox, return_mask=True)
keep_mask = paddle.unsqueeze(keep_mask, [1])
pred_label = paddle.where(keep_mask, pred_label,
paddle.ones_like(pred_label) * -1)
pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1)
return pred_result
def get_origin_shape(self, ):
return self.origin_shape_list
@register
class MaskPostProcess(object):
"""
refer to:
https://github.com/facebookresearch/detectron2/layers/mask_ops.py
Get Mask output according to the output from model
"""
def __init__(self, binary_thresh=0.5):
super(MaskPostProcess, self).__init__()
self.binary_thresh = binary_thresh
def paste_mask(self, masks, boxes, im_h, im_w):
"""
Paste the mask prediction to the original image.
"""
x0, y0, x1, y1 = paddle.split(boxes, 4, axis=1)
masks = paddle.unsqueeze(masks, [0, 1])
img_y = paddle.arange(0, im_h, dtype='float32') + 0.5
img_x = paddle.arange(0, im_w, dtype='float32') + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
img_x = paddle.unsqueeze(img_x, [1])
img_y = paddle.unsqueeze(img_y, [2])
N = boxes.shape[0]
gx = paddle.expand(img_x, [N, img_y.shape[1], img_x.shape[2]])
gy = paddle.expand(img_y, [N, img_y.shape[1], img_x.shape[2]])
grid = paddle.stack([gx, gy], axis=3)
img_masks = F.grid_sample(masks, grid, align_corners=False)
return img_masks[:, 0]
def __call__(self, mask_out, bboxes, bbox_num, origin_shape):
"""
Decode the mask_out and paste the mask to the origin image.
Args:
mask_out (Tensor): mask_head output with shape [N, 28, 28].
bbox_pred (Tensor): The output bboxes with shape [N, 6] after decode
and NMS, including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
origin_shape (Tensor): The origin shape of the input image, the tensor
shape is [N, 2], and each row is [h, w].
Returns:
pred_result (Tensor): The final prediction mask results with shape
[N, h, w] in binary mask style.
"""
num_mask = mask_out.shape[0]
origin_shape = paddle.cast(origin_shape, 'int32')
# TODO: support bs > 1 and mask output dtype is bool
pred_result = paddle.zeros(
[num_mask, origin_shape[0][0], origin_shape[0][1]], dtype='int32')
if bbox_num == 1 and bboxes[0][0] == -1:
return pred_result
# TODO: optimize chunk paste
pred_result = []
for i in range(bboxes.shape[0]):
im_h, im_w = origin_shape[i][0], origin_shape[i][1]
pred_mask = self.paste_mask(mask_out[i], bboxes[i:i + 1, 2:], im_h,
im_w)
pred_mask = pred_mask >= self.binary_thresh
pred_mask = paddle.cast(pred_mask, 'int32')
pred_result.append(pred_mask)
pred_result = paddle.concat(pred_result)
return pred_result
@register
class FCOSPostProcess(object):
__inject__ = ['decode', 'nms']
def __init__(self, decode=None, nms=None):
super(FCOSPostProcess, self).__init__()
self.decode = decode
self.nms = nms
def __call__(self, fcos_head_outs, scale_factor):
"""
Decode the bbox and do NMS in FCOS.
"""
locations, cls_logits, bboxes_reg, centerness = fcos_head_outs
bboxes, score = self.decode(locations, cls_logits, bboxes_reg,
centerness, scale_factor)
bbox_pred, bbox_num, _ = self.nms(bboxes, score)
return bbox_pred, bbox_num
@register
class S2ANetBBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['nms']
def __init__(self, num_classes=15, nms_pre=2000, min_bbox_size=0, nms=None):
super(S2ANetBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.nms_pre = paddle.to_tensor(nms_pre)
self.min_bbox_size = min_bbox_size
self.nms = nms
self.origin_shape_list = []
self.fake_pred_cls_score_bbox = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
def forward(self, pred_scores, pred_bboxes):
"""
pred_scores : [N, M] score
pred_bboxes : [N, 5] xc, yc, w, h, a
im_shape : [N, 2] im_shape
scale_factor : [N, 2] scale_factor
"""
pred_ploys0 = rbox2poly(pred_bboxes)
pred_ploys = paddle.unsqueeze(pred_ploys0, axis=0)
# pred_scores [NA, 16] --> [16, NA]
pred_scores0 = paddle.transpose(pred_scores, [1, 0])
pred_scores = paddle.unsqueeze(pred_scores0, axis=0)
pred_cls_score_bbox, bbox_num, _ = self.nms(pred_ploys, pred_scores,
self.num_classes)
# Prevent empty bbox_pred from decode or NMS.
# Bboxes and score before NMS may be empty due to the score threshold.
if pred_cls_score_bbox.shape[0] <= 0 or pred_cls_score_bbox.shape[
1] <= 1:
pred_cls_score_bbox = self.fake_pred_cls_score_bbox
bbox_num = self.fake_bbox_num
pred_cls_score_bbox = paddle.reshape(pred_cls_score_bbox, [-1, 10])
return pred_cls_score_bbox, bbox_num
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
"""
Rescale, clip and filter the bbox from the output of NMS to
get final prediction.
Args:
bboxes(Tensor): bboxes [N, 10]
bbox_num(Tensor): bbox_num
im_shape(Tensor): [1 2]
scale_factor(Tensor): [1 2]
Returns:
bbox_pred(Tensor): The output is the prediction with shape [N, 8]
including labels, scores and bboxes. The size of
bboxes are corresponding to the original image.
"""
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.expand(origin_shape[i:i + 1, :],
[bbox_num[i], 2])
scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
scale = paddle.concat([
scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x,
scale_y
])
expand_scale = paddle.expand(scale, [bbox_num[i], 8])
origin_shape_list.append(expand_shape)
scale_factor_list.append(expand_scale)
origin_shape_list = paddle.concat(origin_shape_list)
scale_factor_list = paddle.concat(scale_factor_list)
# bboxes: [N, 10], label, score, bbox
pred_label_score = bboxes[:, 0:2]
pred_bbox = bboxes[:, 2:]
# rescale bbox to original image
pred_bbox = pred_bbox.reshape([-1, 8])
scaled_bbox = pred_bbox / scale_factor_list
origin_h = origin_shape_list[:, 0]
origin_w = origin_shape_list[:, 1]
bboxes = scaled_bbox
zeros = paddle.zeros_like(origin_h)
x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], origin_w - 1), zeros)
y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], origin_h - 1), zeros)
x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], origin_w - 1), zeros)
y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], origin_h - 1), zeros)
x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], origin_w - 1), zeros)
y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], origin_h - 1), zeros)
x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], origin_w - 1), zeros)
y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], origin_h - 1), zeros)
pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=-1)
pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1)
return pred_result
@register
class JDEBBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['decode', 'nms']
def __init__(self, num_classes=1, decode=None, nms=None, return_idx=True):
super(JDEBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.decode = decode
self.nms = nms
self.return_idx = return_idx
self.fake_bbox_pred = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
self.fake_nms_keep_idx = paddle.to_tensor(
np.array(
[[0]], dtype='int32'))
self.fake_yolo_boxes_out = paddle.to_tensor(
np.array(
[[[0.0, 0.0, 0.0, 0.0]]], dtype='float32'))
self.fake_yolo_scores_out = paddle.to_tensor(
np.array(
[[[0.0]]], dtype='float32'))
self.fake_boxes_idx = paddle.to_tensor(np.array([[0]], dtype='int64'))
def forward(self, head_out, anchors):
"""
Decode the bbox and do NMS for JDE model.
Args:
head_out (list): Bbox_pred and cls_prob of bbox_head output.
anchors (list): Anchors of JDE model.
Returns:
boxes_idx (Tensor): The index of kept bboxes after decode 'JDEBox'.
bbox_pred (Tensor): The output is the prediction with shape [N, 6]
including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction of each batch with shape [N].
nms_keep_idx (Tensor): The index of kept bboxes after NMS.
"""
boxes_idx, yolo_boxes_scores = self.decode(head_out, anchors)
if len(boxes_idx) == 0:
boxes_idx = self.fake_boxes_idx
yolo_boxes_out = self.fake_yolo_boxes_out
yolo_scores_out = self.fake_yolo_scores_out
else:
yolo_boxes = paddle.gather_nd(yolo_boxes_scores, boxes_idx)
# TODO: only support bs=1 now
yolo_boxes_out = paddle.reshape(
yolo_boxes[:, :4], shape=[1, len(boxes_idx), 4])
yolo_scores_out = paddle.reshape(
yolo_boxes[:, 4:5], shape=[1, 1, len(boxes_idx)])
boxes_idx = boxes_idx[:, 1:]
if self.return_idx:
bbox_pred, bbox_num, nms_keep_idx = self.nms(
yolo_boxes_out, yolo_scores_out, self.num_classes)
if bbox_pred.shape[0] == 0:
bbox_pred = self.fake_bbox_pred
bbox_num = self.fake_bbox_num
nms_keep_idx = self.fake_nms_keep_idx
return boxes_idx, bbox_pred, bbox_num, nms_keep_idx
else:
bbox_pred, bbox_num, _ = self.nms(yolo_boxes_out, yolo_scores_out,
self.num_classes)
if bbox_pred.shape[0] == 0:
bbox_pred = self.fake_bbox_pred
bbox_num = self.fake_bbox_num
return _, bbox_pred, bbox_num, _
@register
class CenterNetPostProcess(TTFBox):
"""
Postprocess the model outputs to get final prediction:
1. Do NMS for heatmap to get top `max_per_img` bboxes.
2. Decode bboxes using center offset and box size.
3. Rescale decoded bboxes reference to the origin image shape.
Args:
max_per_img(int): the maximum number of predicted objects in a image,
500 by default.
down_ratio(int): the down ratio from images to heatmap, 4 by default.
regress_ltrb (bool): whether to regress left/top/right/bottom or
width/height for a box, true by default.
for_mot (bool): whether return other features used in tracking model.
"""
__shared__ = ['down_ratio', 'for_mot']
def __init__(self,
max_per_img=500,
down_ratio=4,
regress_ltrb=True,
for_mot=False):
super(TTFBox, self).__init__()
self.max_per_img = max_per_img
self.down_ratio = down_ratio
self.regress_ltrb = regress_ltrb
self.for_mot = for_mot
def __call__(self, hm, wh, reg, im_shape, scale_factor):
heat = self._simple_nms(hm)
scores, inds, topk_clses, ys, xs = self._topk(heat)
scores = paddle.tensor.unsqueeze(scores, [1])
clses = paddle.tensor.unsqueeze(topk_clses, [1])
reg_t = paddle.transpose(reg, [0, 2, 3, 1])
# Like TTFBox, batch size is 1.
# TODO: support batch size > 1
reg = paddle.reshape(reg_t, [-1, paddle.shape(reg_t)[-1]])
reg = paddle.gather(reg, inds)
xs = paddle.cast(xs, 'float32')
ys = paddle.cast(ys, 'float32')
xs = xs + reg[:, 0:1]
ys = ys + reg[:, 1:2]
wh_t = paddle.transpose(wh, [0, 2, 3, 1])
wh = paddle.reshape(wh_t, [-1, paddle.shape(wh_t)[-1]])
wh = paddle.gather(wh, inds)
if self.regress_ltrb:
x1 = xs - wh[:, 0:1]
y1 = ys - wh[:, 1:2]
x2 = xs + wh[:, 2:3]
y2 = ys + wh[:, 3:4]
else:
x1 = xs - wh[:, 0:1] / 2
y1 = ys - wh[:, 1:2] / 2
x2 = xs + wh[:, 0:1] / 2
y2 = ys + wh[:, 1:2] / 2
n, c, feat_h, feat_w = hm.shape[:]
padw = (feat_w * self.down_ratio - im_shape[0, 1]) / 2
padh = (feat_h * self.down_ratio - im_shape[0, 0]) / 2
x1 = x1 * self.down_ratio
y1 = y1 * self.down_ratio
x2 = x2 * self.down_ratio
y2 = y2 * self.down_ratio
x1 = x1 - padw
y1 = y1 - padh
x2 = x2 - padw
y2 = y2 - padh
bboxes = paddle.concat([x1, y1, x2, y2], axis=1)
scale_y = scale_factor[:, 0:1]
scale_x = scale_factor[:, 1:2]
scale_expand = paddle.concat(
[scale_x, scale_y, scale_x, scale_y], axis=1)
boxes_shape = paddle.shape(bboxes)
boxes_shape.stop_gradient = True
scale_expand = paddle.expand(scale_expand, shape=boxes_shape)
bboxes = paddle.divide(bboxes, scale_expand)
if self.for_mot:
results = paddle.concat([bboxes, scores, clses], axis=1)
return results, inds, topk_clses
else:
results = paddle.concat([clses, scores, bboxes], axis=1)
return results, paddle.shape(results)[0:1], topk_clses
@register
class DETRBBoxPostProcess(object):
__shared__ = ['num_classes', 'use_focal_loss']
__inject__ = []
def __init__(self,
num_classes=80,
num_top_queries=100,
use_focal_loss=False):
super(DETRBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.num_top_queries = num_top_queries
self.use_focal_loss = use_focal_loss
def __call__(self, head_out, im_shape, scale_factor):
"""
Decode the bbox.
Args:
head_out (tuple): bbox_pred, cls_logit and masks of bbox_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [bs], and is N.
"""
bboxes, logits, masks = head_out
bbox_pred = bbox_cxcywh_to_xyxy(bboxes)
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
img_h, img_w = origin_shape.unbind(1)
origin_shape = paddle.stack(
[img_w, img_h, img_w, img_h], axis=-1).unsqueeze(0)
bbox_pred *= origin_shape
scores = F.sigmoid(logits) if self.use_focal_loss else F.softmax(
logits)[:, :, :-1]
if not self.use_focal_loss:
scores, labels = scores.max(-1), scores.argmax(-1)
if scores.shape[1] > self.num_top_queries:
scores, index = paddle.topk(
scores, self.num_top_queries, axis=-1)
labels = paddle.stack(
[paddle.gather(l, i) for l, i in zip(labels, index)])
bbox_pred = paddle.stack(
[paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
else:
scores, index = paddle.topk(
scores.reshape([logits.shape[0], -1]),
self.num_top_queries,
axis=-1)
labels = index % logits.shape[2]
index = index // logits.shape[2]
bbox_pred = paddle.stack(
[paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
bbox_pred = paddle.concat(
[
labels.unsqueeze(-1).astype('float32'), scores.unsqueeze(-1),
bbox_pred
],
axis=-1)
bbox_num = paddle.to_tensor(
bbox_pred.shape[1], dtype='int32').tile([bbox_pred.shape[0]])
bbox_pred = bbox_pred.reshape([-1, 6])
return bbox_pred, bbox_num
@register
class SparsePostProcess(object):
__shared__ = ['num_classes']
def __init__(self, num_proposals, num_classes=80):
super(SparsePostProcess, self).__init__()
self.num_classes = num_classes
self.num_proposals = num_proposals
def __call__(self, box_cls, box_pred, scale_factor_wh, img_whwh):
"""
Arguments:
box_cls (Tensor): tensor of shape (batch_size, num_proposals, K).
The tensor predicts the classification probability for each proposal.
box_pred (Tensor): tensors of shape (batch_size, num_proposals, 4).
The tensor predicts 4-vector (x,y,w,h) box
regression values for every proposal
scale_factor_wh (Tensor): tensors of shape [batch_size, 2] the scalor of per img
img_whwh (Tensor): tensors of shape [batch_size, 4]
Returns:
bbox_pred (Tensor): tensors of shape [num_boxes, 6] Each row has 6 values:
[label, confidence, xmin, ymin, xmax, ymax]
bbox_num (Tensor): tensors of shape [batch_size] the number of RoIs in each image.
"""
assert len(box_cls) == len(scale_factor_wh) == len(img_whwh)
img_wh = img_whwh[:, :2]
scores = F.sigmoid(box_cls)
labels = paddle.arange(0, self.num_classes). \
unsqueeze(0).tile([self.num_proposals, 1]).flatten(start_axis=0, stop_axis=1)
classes_all = []
scores_all = []
boxes_all = []
for i, (scores_per_image,
box_pred_per_image) in enumerate(zip(scores, box_pred)):
scores_per_image, topk_indices = scores_per_image.flatten(
0, 1).topk(
self.num_proposals, sorted=False)
labels_per_image = paddle.gather(labels, topk_indices, axis=0)
box_pred_per_image = box_pred_per_image.reshape([-1, 1, 4]).tile(
[1, self.num_classes, 1]).reshape([-1, 4])
box_pred_per_image = paddle.gather(
box_pred_per_image, topk_indices, axis=0)
classes_all.append(labels_per_image)
scores_all.append(scores_per_image)
boxes_all.append(box_pred_per_image)
bbox_num = paddle.zeros([len(scale_factor_wh)], dtype="int32")
boxes_final = []
for i in range(len(scale_factor_wh)):
classes = classes_all[i]
boxes = boxes_all[i]
scores = scores_all[i]
boxes[:, 0::2] = paddle.clip(
boxes[:, 0::2], min=0, max=img_wh[i][0]) / scale_factor_wh[i][0]
boxes[:, 1::2] = paddle.clip(
boxes[:, 1::2], min=0, max=img_wh[i][1]) / scale_factor_wh[i][1]
boxes_w, boxes_h = (boxes[:, 2] - boxes[:, 0]).numpy(), (
boxes[:, 3] - boxes[:, 1]).numpy()
keep = (boxes_w > 1.) & (boxes_h > 1.)
if (keep.sum() == 0):
bboxes = paddle.zeros([1, 6]).astype("float32")
else:
boxes = paddle.to_tensor(boxes.numpy()[keep]).astype("float32")
classes = paddle.to_tensor(classes.numpy()[keep]).astype(
"float32").unsqueeze(-1)
scores = paddle.to_tensor(scores.numpy()[keep]).astype(
"float32").unsqueeze(-1)
bboxes = paddle.concat([classes, scores, boxes], axis=-1)
boxes_final.append(bboxes)
bbox_num[i] = bboxes.shape[0]
bbox_pred = paddle.concat(boxes_final)
return bbox_pred, bbox_num
def nms(dets, thresh):
"""Apply classic DPM-style greedy NMS."""
if dets.shape[0] == 0:
return dets[[], :]
scores = dets[:, 0]
x1 = dets[:, 1]
y1 = dets[:, 2]
x2 = dets[:, 3]
y2 = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
ndets = dets.shape[0]
suppressed = np.zeros((ndets), dtype=np.int)
# nominal indices
# _i, _j
# sorted indices
# i, j
# temp variables for box i's (the box currently under consideration)
# ix1, iy1, ix2, iy2, iarea
# variables for computing overlap with box j (lower scoring box)
# xx1, yy1, xx2, yy2
# w, h
# inter, ovr
for _i in range(ndets):
i = order[_i]
if suppressed[i] == 1:
continue
ix1 = x1[i]
iy1 = y1[i]
ix2 = x2[i]
iy2 = y2[i]
iarea = areas[i]
for _j in range(_i + 1, ndets):
j = order[_j]
if suppressed[j] == 1:
continue
xx1 = max(ix1, x1[j])
yy1 = max(iy1, y1[j])
xx2 = min(ix2, x2[j])
yy2 = min(iy2, y2[j])
w = max(0.0, xx2 - xx1 + 1)
h = max(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (iarea + areas[j] - inter)
if ovr >= thresh:
suppressed[j] = 1
keep = np.where(suppressed == 0)[0]
dets = dets[keep, :]
return dets
|
__call__
|
Decode the mask_out and paste the mask to the origin image.
Args:
mask_out (Tensor): mask_head output with shape [N, 28, 28].
bbox_pred (Tensor): The output bboxes with shape [N, 6] after decode
and NMS, including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
origin_shape (Tensor): The origin shape of the input image, the tensor
shape is [N, 2], and each row is [h, w].
Returns:
pred_result (Tensor): The final prediction mask results with shape
[N, h, w] in binary mask style.
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register
from ppdet.modeling.bbox_utils import nonempty_bbox, rbox2poly
from ppdet.modeling.layers import TTFBox
from .transformers import bbox_cxcywh_to_xyxy
try:
from collections.abc import Sequence
except Exception:
from collections import Sequence
__all__ = [
'BBoxPostProcess', 'MaskPostProcess', 'FCOSPostProcess',
'S2ANetBBoxPostProcess', 'JDEBBoxPostProcess', 'CenterNetPostProcess',
'DETRBBoxPostProcess', 'SparsePostProcess'
]
@register
class BBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['decode', 'nms']
def __init__(self, num_classes=80, decode=None, nms=None):
super(BBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.decode = decode
self.nms = nms
self.fake_bboxes = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
def forward(self, head_out, rois, im_shape, scale_factor):
"""
Decode the bbox and do NMS if needed.
Args:
head_out (tuple): bbox_pred and cls_prob of bbox_head output.
rois (tuple): roi and rois_num of rpn_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
"""
if self.nms is not None:
bboxes, score = self.decode(head_out, rois, im_shape, scale_factor)
bbox_pred, bbox_num, _ = self.nms(bboxes, score, self.num_classes)
else:
bbox_pred, bbox_num = self.decode(head_out, rois, im_shape,
scale_factor)
return bbox_pred, bbox_num
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
"""
Rescale, clip and filter the bbox from the output of NMS to
get final prediction.
Notes:
Currently only support bs = 1.
Args:
bboxes (Tensor): The output bboxes with shape [N, 6] after decode
and NMS, including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
pred_result (Tensor): The final prediction results with shape [N, 6]
including labels, scores and bboxes.
"""
if bboxes.shape[0] == 0:
bboxes = self.fake_bboxes
bbox_num = self.fake_bbox_num
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.expand(origin_shape[i:i + 1, :],
[bbox_num[i], 2])
scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
scale = paddle.concat([scale_x, scale_y, scale_x, scale_y])
expand_scale = paddle.expand(scale, [bbox_num[i], 4])
origin_shape_list.append(expand_shape)
scale_factor_list.append(expand_scale)
self.origin_shape_list = paddle.concat(origin_shape_list)
scale_factor_list = paddle.concat(scale_factor_list)
# bboxes: [N, 6], label, score, bbox
pred_label = bboxes[:, 0:1]
pred_score = bboxes[:, 1:2]
pred_bbox = bboxes[:, 2:]
# rescale bbox to original image
scaled_bbox = pred_bbox / scale_factor_list
origin_h = self.origin_shape_list[:, 0]
origin_w = self.origin_shape_list[:, 1]
zeros = paddle.zeros_like(origin_h)
# clip bbox to [0, original_size]
x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros)
y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros)
x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros)
y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros)
pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1)
# filter empty bbox
keep_mask = nonempty_bbox(pred_bbox, return_mask=True)
keep_mask = paddle.unsqueeze(keep_mask, [1])
pred_label = paddle.where(keep_mask, pred_label,
paddle.ones_like(pred_label) * -1)
pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1)
return pred_result
def get_origin_shape(self, ):
return self.origin_shape_list
@register
class MaskPostProcess(object):
"""
refer to:
https://github.com/facebookresearch/detectron2/layers/mask_ops.py
Get Mask output according to the output from model
"""
def __init__(self, binary_thresh=0.5):
super(MaskPostProcess, self).__init__()
self.binary_thresh = binary_thresh
def paste_mask(self, masks, boxes, im_h, im_w):
"""
Paste the mask prediction to the original image.
"""
x0, y0, x1, y1 = paddle.split(boxes, 4, axis=1)
masks = paddle.unsqueeze(masks, [0, 1])
img_y = paddle.arange(0, im_h, dtype='float32') + 0.5
img_x = paddle.arange(0, im_w, dtype='float32') + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
img_x = paddle.unsqueeze(img_x, [1])
img_y = paddle.unsqueeze(img_y, [2])
N = boxes.shape[0]
gx = paddle.expand(img_x, [N, img_y.shape[1], img_x.shape[2]])
gy = paddle.expand(img_y, [N, img_y.shape[1], img_x.shape[2]])
grid = paddle.stack([gx, gy], axis=3)
img_masks = F.grid_sample(masks, grid, align_corners=False)
return img_masks[:, 0]
# MASKED: __call__ function (lines 175-209)
@register
class FCOSPostProcess(object):
__inject__ = ['decode', 'nms']
def __init__(self, decode=None, nms=None):
super(FCOSPostProcess, self).__init__()
self.decode = decode
self.nms = nms
def __call__(self, fcos_head_outs, scale_factor):
"""
Decode the bbox and do NMS in FCOS.
"""
locations, cls_logits, bboxes_reg, centerness = fcos_head_outs
bboxes, score = self.decode(locations, cls_logits, bboxes_reg,
centerness, scale_factor)
bbox_pred, bbox_num, _ = self.nms(bboxes, score)
return bbox_pred, bbox_num
@register
class S2ANetBBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['nms']
def __init__(self, num_classes=15, nms_pre=2000, min_bbox_size=0, nms=None):
super(S2ANetBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.nms_pre = paddle.to_tensor(nms_pre)
self.min_bbox_size = min_bbox_size
self.nms = nms
self.origin_shape_list = []
self.fake_pred_cls_score_bbox = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
def forward(self, pred_scores, pred_bboxes):
"""
pred_scores : [N, M] score
pred_bboxes : [N, 5] xc, yc, w, h, a
im_shape : [N, 2] im_shape
scale_factor : [N, 2] scale_factor
"""
pred_ploys0 = rbox2poly(pred_bboxes)
pred_ploys = paddle.unsqueeze(pred_ploys0, axis=0)
# pred_scores [NA, 16] --> [16, NA]
pred_scores0 = paddle.transpose(pred_scores, [1, 0])
pred_scores = paddle.unsqueeze(pred_scores0, axis=0)
pred_cls_score_bbox, bbox_num, _ = self.nms(pred_ploys, pred_scores,
self.num_classes)
# Prevent empty bbox_pred from decode or NMS.
# Bboxes and score before NMS may be empty due to the score threshold.
if pred_cls_score_bbox.shape[0] <= 0 or pred_cls_score_bbox.shape[
1] <= 1:
pred_cls_score_bbox = self.fake_pred_cls_score_bbox
bbox_num = self.fake_bbox_num
pred_cls_score_bbox = paddle.reshape(pred_cls_score_bbox, [-1, 10])
return pred_cls_score_bbox, bbox_num
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
"""
Rescale, clip and filter the bbox from the output of NMS to
get final prediction.
Args:
bboxes(Tensor): bboxes [N, 10]
bbox_num(Tensor): bbox_num
im_shape(Tensor): [1 2]
scale_factor(Tensor): [1 2]
Returns:
bbox_pred(Tensor): The output is the prediction with shape [N, 8]
including labels, scores and bboxes. The size of
bboxes are corresponding to the original image.
"""
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.expand(origin_shape[i:i + 1, :],
[bbox_num[i], 2])
scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
scale = paddle.concat([
scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x,
scale_y
])
expand_scale = paddle.expand(scale, [bbox_num[i], 8])
origin_shape_list.append(expand_shape)
scale_factor_list.append(expand_scale)
origin_shape_list = paddle.concat(origin_shape_list)
scale_factor_list = paddle.concat(scale_factor_list)
# bboxes: [N, 10], label, score, bbox
pred_label_score = bboxes[:, 0:2]
pred_bbox = bboxes[:, 2:]
# rescale bbox to original image
pred_bbox = pred_bbox.reshape([-1, 8])
scaled_bbox = pred_bbox / scale_factor_list
origin_h = origin_shape_list[:, 0]
origin_w = origin_shape_list[:, 1]
bboxes = scaled_bbox
zeros = paddle.zeros_like(origin_h)
x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], origin_w - 1), zeros)
y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], origin_h - 1), zeros)
x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], origin_w - 1), zeros)
y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], origin_h - 1), zeros)
x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], origin_w - 1), zeros)
y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], origin_h - 1), zeros)
x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], origin_w - 1), zeros)
y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], origin_h - 1), zeros)
pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=-1)
pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1)
return pred_result
@register
class JDEBBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['decode', 'nms']
def __init__(self, num_classes=1, decode=None, nms=None, return_idx=True):
super(JDEBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.decode = decode
self.nms = nms
self.return_idx = return_idx
self.fake_bbox_pred = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
self.fake_nms_keep_idx = paddle.to_tensor(
np.array(
[[0]], dtype='int32'))
self.fake_yolo_boxes_out = paddle.to_tensor(
np.array(
[[[0.0, 0.0, 0.0, 0.0]]], dtype='float32'))
self.fake_yolo_scores_out = paddle.to_tensor(
np.array(
[[[0.0]]], dtype='float32'))
self.fake_boxes_idx = paddle.to_tensor(np.array([[0]], dtype='int64'))
def forward(self, head_out, anchors):
"""
Decode the bbox and do NMS for JDE model.
Args:
head_out (list): Bbox_pred and cls_prob of bbox_head output.
anchors (list): Anchors of JDE model.
Returns:
boxes_idx (Tensor): The index of kept bboxes after decode 'JDEBox'.
bbox_pred (Tensor): The output is the prediction with shape [N, 6]
including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction of each batch with shape [N].
nms_keep_idx (Tensor): The index of kept bboxes after NMS.
"""
boxes_idx, yolo_boxes_scores = self.decode(head_out, anchors)
if len(boxes_idx) == 0:
boxes_idx = self.fake_boxes_idx
yolo_boxes_out = self.fake_yolo_boxes_out
yolo_scores_out = self.fake_yolo_scores_out
else:
yolo_boxes = paddle.gather_nd(yolo_boxes_scores, boxes_idx)
# TODO: only support bs=1 now
yolo_boxes_out = paddle.reshape(
yolo_boxes[:, :4], shape=[1, len(boxes_idx), 4])
yolo_scores_out = paddle.reshape(
yolo_boxes[:, 4:5], shape=[1, 1, len(boxes_idx)])
boxes_idx = boxes_idx[:, 1:]
if self.return_idx:
bbox_pred, bbox_num, nms_keep_idx = self.nms(
yolo_boxes_out, yolo_scores_out, self.num_classes)
if bbox_pred.shape[0] == 0:
bbox_pred = self.fake_bbox_pred
bbox_num = self.fake_bbox_num
nms_keep_idx = self.fake_nms_keep_idx
return boxes_idx, bbox_pred, bbox_num, nms_keep_idx
else:
bbox_pred, bbox_num, _ = self.nms(yolo_boxes_out, yolo_scores_out,
self.num_classes)
if bbox_pred.shape[0] == 0:
bbox_pred = self.fake_bbox_pred
bbox_num = self.fake_bbox_num
return _, bbox_pred, bbox_num, _
@register
class CenterNetPostProcess(TTFBox):
"""
Postprocess the model outputs to get final prediction:
1. Do NMS for heatmap to get top `max_per_img` bboxes.
2. Decode bboxes using center offset and box size.
3. Rescale decoded bboxes reference to the origin image shape.
Args:
max_per_img(int): the maximum number of predicted objects in a image,
500 by default.
down_ratio(int): the down ratio from images to heatmap, 4 by default.
regress_ltrb (bool): whether to regress left/top/right/bottom or
width/height for a box, true by default.
for_mot (bool): whether return other features used in tracking model.
"""
__shared__ = ['down_ratio', 'for_mot']
def __init__(self,
max_per_img=500,
down_ratio=4,
regress_ltrb=True,
for_mot=False):
super(TTFBox, self).__init__()
self.max_per_img = max_per_img
self.down_ratio = down_ratio
self.regress_ltrb = regress_ltrb
self.for_mot = for_mot
def __call__(self, hm, wh, reg, im_shape, scale_factor):
heat = self._simple_nms(hm)
scores, inds, topk_clses, ys, xs = self._topk(heat)
scores = paddle.tensor.unsqueeze(scores, [1])
clses = paddle.tensor.unsqueeze(topk_clses, [1])
reg_t = paddle.transpose(reg, [0, 2, 3, 1])
# Like TTFBox, batch size is 1.
# TODO: support batch size > 1
reg = paddle.reshape(reg_t, [-1, paddle.shape(reg_t)[-1]])
reg = paddle.gather(reg, inds)
xs = paddle.cast(xs, 'float32')
ys = paddle.cast(ys, 'float32')
xs = xs + reg[:, 0:1]
ys = ys + reg[:, 1:2]
wh_t = paddle.transpose(wh, [0, 2, 3, 1])
wh = paddle.reshape(wh_t, [-1, paddle.shape(wh_t)[-1]])
wh = paddle.gather(wh, inds)
if self.regress_ltrb:
x1 = xs - wh[:, 0:1]
y1 = ys - wh[:, 1:2]
x2 = xs + wh[:, 2:3]
y2 = ys + wh[:, 3:4]
else:
x1 = xs - wh[:, 0:1] / 2
y1 = ys - wh[:, 1:2] / 2
x2 = xs + wh[:, 0:1] / 2
y2 = ys + wh[:, 1:2] / 2
n, c, feat_h, feat_w = hm.shape[:]
padw = (feat_w * self.down_ratio - im_shape[0, 1]) / 2
padh = (feat_h * self.down_ratio - im_shape[0, 0]) / 2
x1 = x1 * self.down_ratio
y1 = y1 * self.down_ratio
x2 = x2 * self.down_ratio
y2 = y2 * self.down_ratio
x1 = x1 - padw
y1 = y1 - padh
x2 = x2 - padw
y2 = y2 - padh
bboxes = paddle.concat([x1, y1, x2, y2], axis=1)
scale_y = scale_factor[:, 0:1]
scale_x = scale_factor[:, 1:2]
scale_expand = paddle.concat(
[scale_x, scale_y, scale_x, scale_y], axis=1)
boxes_shape = paddle.shape(bboxes)
boxes_shape.stop_gradient = True
scale_expand = paddle.expand(scale_expand, shape=boxes_shape)
bboxes = paddle.divide(bboxes, scale_expand)
if self.for_mot:
results = paddle.concat([bboxes, scores, clses], axis=1)
return results, inds, topk_clses
else:
results = paddle.concat([clses, scores, bboxes], axis=1)
return results, paddle.shape(results)[0:1], topk_clses
@register
class DETRBBoxPostProcess(object):
__shared__ = ['num_classes', 'use_focal_loss']
__inject__ = []
def __init__(self,
num_classes=80,
num_top_queries=100,
use_focal_loss=False):
super(DETRBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.num_top_queries = num_top_queries
self.use_focal_loss = use_focal_loss
def __call__(self, head_out, im_shape, scale_factor):
"""
Decode the bbox.
Args:
head_out (tuple): bbox_pred, cls_logit and masks of bbox_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [bs], and is N.
"""
bboxes, logits, masks = head_out
bbox_pred = bbox_cxcywh_to_xyxy(bboxes)
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
img_h, img_w = origin_shape.unbind(1)
origin_shape = paddle.stack(
[img_w, img_h, img_w, img_h], axis=-1).unsqueeze(0)
bbox_pred *= origin_shape
scores = F.sigmoid(logits) if self.use_focal_loss else F.softmax(
logits)[:, :, :-1]
if not self.use_focal_loss:
scores, labels = scores.max(-1), scores.argmax(-1)
if scores.shape[1] > self.num_top_queries:
scores, index = paddle.topk(
scores, self.num_top_queries, axis=-1)
labels = paddle.stack(
[paddle.gather(l, i) for l, i in zip(labels, index)])
bbox_pred = paddle.stack(
[paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
else:
scores, index = paddle.topk(
scores.reshape([logits.shape[0], -1]),
self.num_top_queries,
axis=-1)
labels = index % logits.shape[2]
index = index // logits.shape[2]
bbox_pred = paddle.stack(
[paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
bbox_pred = paddle.concat(
[
labels.unsqueeze(-1).astype('float32'), scores.unsqueeze(-1),
bbox_pred
],
axis=-1)
bbox_num = paddle.to_tensor(
bbox_pred.shape[1], dtype='int32').tile([bbox_pred.shape[0]])
bbox_pred = bbox_pred.reshape([-1, 6])
return bbox_pred, bbox_num
@register
class SparsePostProcess(object):
__shared__ = ['num_classes']
def __init__(self, num_proposals, num_classes=80):
super(SparsePostProcess, self).__init__()
self.num_classes = num_classes
self.num_proposals = num_proposals
def __call__(self, box_cls, box_pred, scale_factor_wh, img_whwh):
"""
Arguments:
box_cls (Tensor): tensor of shape (batch_size, num_proposals, K).
The tensor predicts the classification probability for each proposal.
box_pred (Tensor): tensors of shape (batch_size, num_proposals, 4).
The tensor predicts 4-vector (x,y,w,h) box
regression values for every proposal
scale_factor_wh (Tensor): tensors of shape [batch_size, 2] the scalor of per img
img_whwh (Tensor): tensors of shape [batch_size, 4]
Returns:
bbox_pred (Tensor): tensors of shape [num_boxes, 6] Each row has 6 values:
[label, confidence, xmin, ymin, xmax, ymax]
bbox_num (Tensor): tensors of shape [batch_size] the number of RoIs in each image.
"""
assert len(box_cls) == len(scale_factor_wh) == len(img_whwh)
img_wh = img_whwh[:, :2]
scores = F.sigmoid(box_cls)
labels = paddle.arange(0, self.num_classes). \
unsqueeze(0).tile([self.num_proposals, 1]).flatten(start_axis=0, stop_axis=1)
classes_all = []
scores_all = []
boxes_all = []
for i, (scores_per_image,
box_pred_per_image) in enumerate(zip(scores, box_pred)):
scores_per_image, topk_indices = scores_per_image.flatten(
0, 1).topk(
self.num_proposals, sorted=False)
labels_per_image = paddle.gather(labels, topk_indices, axis=0)
box_pred_per_image = box_pred_per_image.reshape([-1, 1, 4]).tile(
[1, self.num_classes, 1]).reshape([-1, 4])
box_pred_per_image = paddle.gather(
box_pred_per_image, topk_indices, axis=0)
classes_all.append(labels_per_image)
scores_all.append(scores_per_image)
boxes_all.append(box_pred_per_image)
bbox_num = paddle.zeros([len(scale_factor_wh)], dtype="int32")
boxes_final = []
for i in range(len(scale_factor_wh)):
classes = classes_all[i]
boxes = boxes_all[i]
scores = scores_all[i]
boxes[:, 0::2] = paddle.clip(
boxes[:, 0::2], min=0, max=img_wh[i][0]) / scale_factor_wh[i][0]
boxes[:, 1::2] = paddle.clip(
boxes[:, 1::2], min=0, max=img_wh[i][1]) / scale_factor_wh[i][1]
boxes_w, boxes_h = (boxes[:, 2] - boxes[:, 0]).numpy(), (
boxes[:, 3] - boxes[:, 1]).numpy()
keep = (boxes_w > 1.) & (boxes_h > 1.)
if (keep.sum() == 0):
bboxes = paddle.zeros([1, 6]).astype("float32")
else:
boxes = paddle.to_tensor(boxes.numpy()[keep]).astype("float32")
classes = paddle.to_tensor(classes.numpy()[keep]).astype(
"float32").unsqueeze(-1)
scores = paddle.to_tensor(scores.numpy()[keep]).astype(
"float32").unsqueeze(-1)
bboxes = paddle.concat([classes, scores, boxes], axis=-1)
boxes_final.append(bboxes)
bbox_num[i] = bboxes.shape[0]
bbox_pred = paddle.concat(boxes_final)
return bbox_pred, bbox_num
def nms(dets, thresh):
"""Apply classic DPM-style greedy NMS."""
if dets.shape[0] == 0:
return dets[[], :]
scores = dets[:, 0]
x1 = dets[:, 1]
y1 = dets[:, 2]
x2 = dets[:, 3]
y2 = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
ndets = dets.shape[0]
suppressed = np.zeros((ndets), dtype=np.int)
# nominal indices
# _i, _j
# sorted indices
# i, j
# temp variables for box i's (the box currently under consideration)
# ix1, iy1, ix2, iy2, iarea
# variables for computing overlap with box j (lower scoring box)
# xx1, yy1, xx2, yy2
# w, h
# inter, ovr
for _i in range(ndets):
i = order[_i]
if suppressed[i] == 1:
continue
ix1 = x1[i]
iy1 = y1[i]
ix2 = x2[i]
iy2 = y2[i]
iarea = areas[i]
for _j in range(_i + 1, ndets):
j = order[_j]
if suppressed[j] == 1:
continue
xx1 = max(ix1, x1[j])
yy1 = max(iy1, y1[j])
xx2 = min(ix2, x2[j])
yy2 = min(iy2, y2[j])
w = max(0.0, xx2 - xx1 + 1)
h = max(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (iarea + areas[j] - inter)
if ovr >= thresh:
suppressed[j] = 1
keep = np.where(suppressed == 0)[0]
dets = dets[keep, :]
return dets
|
def __call__(self, mask_out, bboxes, bbox_num, origin_shape):
"""
Decode the mask_out and paste the mask to the origin image.
Args:
mask_out (Tensor): mask_head output with shape [N, 28, 28].
bbox_pred (Tensor): The output bboxes with shape [N, 6] after decode
and NMS, including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
origin_shape (Tensor): The origin shape of the input image, the tensor
shape is [N, 2], and each row is [h, w].
Returns:
pred_result (Tensor): The final prediction mask results with shape
[N, h, w] in binary mask style.
"""
num_mask = mask_out.shape[0]
origin_shape = paddle.cast(origin_shape, 'int32')
# TODO: support bs > 1 and mask output dtype is bool
pred_result = paddle.zeros(
[num_mask, origin_shape[0][0], origin_shape[0][1]], dtype='int32')
if bbox_num == 1 and bboxes[0][0] == -1:
return pred_result
# TODO: optimize chunk paste
pred_result = []
for i in range(bboxes.shape[0]):
im_h, im_w = origin_shape[i][0], origin_shape[i][1]
pred_mask = self.paste_mask(mask_out[i], bboxes[i:i + 1, 2:], im_h,
im_w)
pred_mask = pred_mask >= self.binary_thresh
pred_mask = paddle.cast(pred_mask, 'int32')
pred_result.append(pred_mask)
pred_result = paddle.concat(pred_result)
return pred_result
| 175 | 209 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register
from ppdet.modeling.bbox_utils import nonempty_bbox, rbox2poly
from ppdet.modeling.layers import TTFBox
from .transformers import bbox_cxcywh_to_xyxy
try:
from collections.abc import Sequence
except Exception:
from collections import Sequence
__all__ = [
'BBoxPostProcess', 'MaskPostProcess', 'FCOSPostProcess',
'S2ANetBBoxPostProcess', 'JDEBBoxPostProcess', 'CenterNetPostProcess',
'DETRBBoxPostProcess', 'SparsePostProcess'
]
@register
class BBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['decode', 'nms']
def __init__(self, num_classes=80, decode=None, nms=None):
super(BBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.decode = decode
self.nms = nms
self.fake_bboxes = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
def forward(self, head_out, rois, im_shape, scale_factor):
"""
Decode the bbox and do NMS if needed.
Args:
head_out (tuple): bbox_pred and cls_prob of bbox_head output.
rois (tuple): roi and rois_num of rpn_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
"""
if self.nms is not None:
bboxes, score = self.decode(head_out, rois, im_shape, scale_factor)
bbox_pred, bbox_num, _ = self.nms(bboxes, score, self.num_classes)
else:
bbox_pred, bbox_num = self.decode(head_out, rois, im_shape,
scale_factor)
return bbox_pred, bbox_num
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
"""
Rescale, clip and filter the bbox from the output of NMS to
get final prediction.
Notes:
Currently only support bs = 1.
Args:
bboxes (Tensor): The output bboxes with shape [N, 6] after decode
and NMS, including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
pred_result (Tensor): The final prediction results with shape [N, 6]
including labels, scores and bboxes.
"""
if bboxes.shape[0] == 0:
bboxes = self.fake_bboxes
bbox_num = self.fake_bbox_num
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.expand(origin_shape[i:i + 1, :],
[bbox_num[i], 2])
scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
scale = paddle.concat([scale_x, scale_y, scale_x, scale_y])
expand_scale = paddle.expand(scale, [bbox_num[i], 4])
origin_shape_list.append(expand_shape)
scale_factor_list.append(expand_scale)
self.origin_shape_list = paddle.concat(origin_shape_list)
scale_factor_list = paddle.concat(scale_factor_list)
# bboxes: [N, 6], label, score, bbox
pred_label = bboxes[:, 0:1]
pred_score = bboxes[:, 1:2]
pred_bbox = bboxes[:, 2:]
# rescale bbox to original image
scaled_bbox = pred_bbox / scale_factor_list
origin_h = self.origin_shape_list[:, 0]
origin_w = self.origin_shape_list[:, 1]
zeros = paddle.zeros_like(origin_h)
# clip bbox to [0, original_size]
x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros)
y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros)
x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros)
y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros)
pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1)
# filter empty bbox
keep_mask = nonempty_bbox(pred_bbox, return_mask=True)
keep_mask = paddle.unsqueeze(keep_mask, [1])
pred_label = paddle.where(keep_mask, pred_label,
paddle.ones_like(pred_label) * -1)
pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1)
return pred_result
def get_origin_shape(self, ):
return self.origin_shape_list
@register
class MaskPostProcess(object):
"""
refer to:
https://github.com/facebookresearch/detectron2/layers/mask_ops.py
Get Mask output according to the output from model
"""
def __init__(self, binary_thresh=0.5):
super(MaskPostProcess, self).__init__()
self.binary_thresh = binary_thresh
def paste_mask(self, masks, boxes, im_h, im_w):
"""
Paste the mask prediction to the original image.
"""
x0, y0, x1, y1 = paddle.split(boxes, 4, axis=1)
masks = paddle.unsqueeze(masks, [0, 1])
img_y = paddle.arange(0, im_h, dtype='float32') + 0.5
img_x = paddle.arange(0, im_w, dtype='float32') + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
img_x = paddle.unsqueeze(img_x, [1])
img_y = paddle.unsqueeze(img_y, [2])
N = boxes.shape[0]
gx = paddle.expand(img_x, [N, img_y.shape[1], img_x.shape[2]])
gy = paddle.expand(img_y, [N, img_y.shape[1], img_x.shape[2]])
grid = paddle.stack([gx, gy], axis=3)
img_masks = F.grid_sample(masks, grid, align_corners=False)
return img_masks[:, 0]
def __call__(self, mask_out, bboxes, bbox_num, origin_shape):
"""
Decode the mask_out and paste the mask to the origin image.
Args:
mask_out (Tensor): mask_head output with shape [N, 28, 28].
bbox_pred (Tensor): The output bboxes with shape [N, 6] after decode
and NMS, including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
origin_shape (Tensor): The origin shape of the input image, the tensor
shape is [N, 2], and each row is [h, w].
Returns:
pred_result (Tensor): The final prediction mask results with shape
[N, h, w] in binary mask style.
"""
num_mask = mask_out.shape[0]
origin_shape = paddle.cast(origin_shape, 'int32')
# TODO: support bs > 1 and mask output dtype is bool
pred_result = paddle.zeros(
[num_mask, origin_shape[0][0], origin_shape[0][1]], dtype='int32')
if bbox_num == 1 and bboxes[0][0] == -1:
return pred_result
# TODO: optimize chunk paste
pred_result = []
for i in range(bboxes.shape[0]):
im_h, im_w = origin_shape[i][0], origin_shape[i][1]
pred_mask = self.paste_mask(mask_out[i], bboxes[i:i + 1, 2:], im_h,
im_w)
pred_mask = pred_mask >= self.binary_thresh
pred_mask = paddle.cast(pred_mask, 'int32')
pred_result.append(pred_mask)
pred_result = paddle.concat(pred_result)
return pred_result
@register
class FCOSPostProcess(object):
__inject__ = ['decode', 'nms']
def __init__(self, decode=None, nms=None):
super(FCOSPostProcess, self).__init__()
self.decode = decode
self.nms = nms
def __call__(self, fcos_head_outs, scale_factor):
"""
Decode the bbox and do NMS in FCOS.
"""
locations, cls_logits, bboxes_reg, centerness = fcos_head_outs
bboxes, score = self.decode(locations, cls_logits, bboxes_reg,
centerness, scale_factor)
bbox_pred, bbox_num, _ = self.nms(bboxes, score)
return bbox_pred, bbox_num
@register
class S2ANetBBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['nms']
def __init__(self, num_classes=15, nms_pre=2000, min_bbox_size=0, nms=None):
super(S2ANetBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.nms_pre = paddle.to_tensor(nms_pre)
self.min_bbox_size = min_bbox_size
self.nms = nms
self.origin_shape_list = []
self.fake_pred_cls_score_bbox = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
def forward(self, pred_scores, pred_bboxes):
"""
pred_scores : [N, M] score
pred_bboxes : [N, 5] xc, yc, w, h, a
im_shape : [N, 2] im_shape
scale_factor : [N, 2] scale_factor
"""
pred_ploys0 = rbox2poly(pred_bboxes)
pred_ploys = paddle.unsqueeze(pred_ploys0, axis=0)
# pred_scores [NA, 16] --> [16, NA]
pred_scores0 = paddle.transpose(pred_scores, [1, 0])
pred_scores = paddle.unsqueeze(pred_scores0, axis=0)
pred_cls_score_bbox, bbox_num, _ = self.nms(pred_ploys, pred_scores,
self.num_classes)
# Prevent empty bbox_pred from decode or NMS.
# Bboxes and score before NMS may be empty due to the score threshold.
if pred_cls_score_bbox.shape[0] <= 0 or pred_cls_score_bbox.shape[
1] <= 1:
pred_cls_score_bbox = self.fake_pred_cls_score_bbox
bbox_num = self.fake_bbox_num
pred_cls_score_bbox = paddle.reshape(pred_cls_score_bbox, [-1, 10])
return pred_cls_score_bbox, bbox_num
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
"""
Rescale, clip and filter the bbox from the output of NMS to
get final prediction.
Args:
bboxes(Tensor): bboxes [N, 10]
bbox_num(Tensor): bbox_num
im_shape(Tensor): [1 2]
scale_factor(Tensor): [1 2]
Returns:
bbox_pred(Tensor): The output is the prediction with shape [N, 8]
including labels, scores and bboxes. The size of
bboxes are corresponding to the original image.
"""
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.expand(origin_shape[i:i + 1, :],
[bbox_num[i], 2])
scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
scale = paddle.concat([
scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x,
scale_y
])
expand_scale = paddle.expand(scale, [bbox_num[i], 8])
origin_shape_list.append(expand_shape)
scale_factor_list.append(expand_scale)
origin_shape_list = paddle.concat(origin_shape_list)
scale_factor_list = paddle.concat(scale_factor_list)
# bboxes: [N, 10], label, score, bbox
pred_label_score = bboxes[:, 0:2]
pred_bbox = bboxes[:, 2:]
# rescale bbox to original image
pred_bbox = pred_bbox.reshape([-1, 8])
scaled_bbox = pred_bbox / scale_factor_list
origin_h = origin_shape_list[:, 0]
origin_w = origin_shape_list[:, 1]
bboxes = scaled_bbox
zeros = paddle.zeros_like(origin_h)
x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], origin_w - 1), zeros)
y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], origin_h - 1), zeros)
x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], origin_w - 1), zeros)
y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], origin_h - 1), zeros)
x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], origin_w - 1), zeros)
y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], origin_h - 1), zeros)
x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], origin_w - 1), zeros)
y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], origin_h - 1), zeros)
pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=-1)
pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1)
return pred_result
@register
class JDEBBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['decode', 'nms']
def __init__(self, num_classes=1, decode=None, nms=None, return_idx=True):
super(JDEBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.decode = decode
self.nms = nms
self.return_idx = return_idx
self.fake_bbox_pred = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
self.fake_nms_keep_idx = paddle.to_tensor(
np.array(
[[0]], dtype='int32'))
self.fake_yolo_boxes_out = paddle.to_tensor(
np.array(
[[[0.0, 0.0, 0.0, 0.0]]], dtype='float32'))
self.fake_yolo_scores_out = paddle.to_tensor(
np.array(
[[[0.0]]], dtype='float32'))
self.fake_boxes_idx = paddle.to_tensor(np.array([[0]], dtype='int64'))
def forward(self, head_out, anchors):
"""
Decode the bbox and do NMS for JDE model.
Args:
head_out (list): Bbox_pred and cls_prob of bbox_head output.
anchors (list): Anchors of JDE model.
Returns:
boxes_idx (Tensor): The index of kept bboxes after decode 'JDEBox'.
bbox_pred (Tensor): The output is the prediction with shape [N, 6]
including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction of each batch with shape [N].
nms_keep_idx (Tensor): The index of kept bboxes after NMS.
"""
boxes_idx, yolo_boxes_scores = self.decode(head_out, anchors)
if len(boxes_idx) == 0:
boxes_idx = self.fake_boxes_idx
yolo_boxes_out = self.fake_yolo_boxes_out
yolo_scores_out = self.fake_yolo_scores_out
else:
yolo_boxes = paddle.gather_nd(yolo_boxes_scores, boxes_idx)
# TODO: only support bs=1 now
yolo_boxes_out = paddle.reshape(
yolo_boxes[:, :4], shape=[1, len(boxes_idx), 4])
yolo_scores_out = paddle.reshape(
yolo_boxes[:, 4:5], shape=[1, 1, len(boxes_idx)])
boxes_idx = boxes_idx[:, 1:]
if self.return_idx:
bbox_pred, bbox_num, nms_keep_idx = self.nms(
yolo_boxes_out, yolo_scores_out, self.num_classes)
if bbox_pred.shape[0] == 0:
bbox_pred = self.fake_bbox_pred
bbox_num = self.fake_bbox_num
nms_keep_idx = self.fake_nms_keep_idx
return boxes_idx, bbox_pred, bbox_num, nms_keep_idx
else:
bbox_pred, bbox_num, _ = self.nms(yolo_boxes_out, yolo_scores_out,
self.num_classes)
if bbox_pred.shape[0] == 0:
bbox_pred = self.fake_bbox_pred
bbox_num = self.fake_bbox_num
return _, bbox_pred, bbox_num, _
@register
class CenterNetPostProcess(TTFBox):
"""
Postprocess the model outputs to get final prediction:
1. Do NMS for heatmap to get top `max_per_img` bboxes.
2. Decode bboxes using center offset and box size.
3. Rescale decoded bboxes reference to the origin image shape.
Args:
max_per_img(int): the maximum number of predicted objects in a image,
500 by default.
down_ratio(int): the down ratio from images to heatmap, 4 by default.
regress_ltrb (bool): whether to regress left/top/right/bottom or
width/height for a box, true by default.
for_mot (bool): whether return other features used in tracking model.
"""
__shared__ = ['down_ratio', 'for_mot']
def __init__(self,
max_per_img=500,
down_ratio=4,
regress_ltrb=True,
for_mot=False):
super(TTFBox, self).__init__()
self.max_per_img = max_per_img
self.down_ratio = down_ratio
self.regress_ltrb = regress_ltrb
self.for_mot = for_mot
def __call__(self, hm, wh, reg, im_shape, scale_factor):
heat = self._simple_nms(hm)
scores, inds, topk_clses, ys, xs = self._topk(heat)
scores = paddle.tensor.unsqueeze(scores, [1])
clses = paddle.tensor.unsqueeze(topk_clses, [1])
reg_t = paddle.transpose(reg, [0, 2, 3, 1])
# Like TTFBox, batch size is 1.
# TODO: support batch size > 1
reg = paddle.reshape(reg_t, [-1, paddle.shape(reg_t)[-1]])
reg = paddle.gather(reg, inds)
xs = paddle.cast(xs, 'float32')
ys = paddle.cast(ys, 'float32')
xs = xs + reg[:, 0:1]
ys = ys + reg[:, 1:2]
wh_t = paddle.transpose(wh, [0, 2, 3, 1])
wh = paddle.reshape(wh_t, [-1, paddle.shape(wh_t)[-1]])
wh = paddle.gather(wh, inds)
if self.regress_ltrb:
x1 = xs - wh[:, 0:1]
y1 = ys - wh[:, 1:2]
x2 = xs + wh[:, 2:3]
y2 = ys + wh[:, 3:4]
else:
x1 = xs - wh[:, 0:1] / 2
y1 = ys - wh[:, 1:2] / 2
x2 = xs + wh[:, 0:1] / 2
y2 = ys + wh[:, 1:2] / 2
n, c, feat_h, feat_w = hm.shape[:]
padw = (feat_w * self.down_ratio - im_shape[0, 1]) / 2
padh = (feat_h * self.down_ratio - im_shape[0, 0]) / 2
x1 = x1 * self.down_ratio
y1 = y1 * self.down_ratio
x2 = x2 * self.down_ratio
y2 = y2 * self.down_ratio
x1 = x1 - padw
y1 = y1 - padh
x2 = x2 - padw
y2 = y2 - padh
bboxes = paddle.concat([x1, y1, x2, y2], axis=1)
scale_y = scale_factor[:, 0:1]
scale_x = scale_factor[:, 1:2]
scale_expand = paddle.concat(
[scale_x, scale_y, scale_x, scale_y], axis=1)
boxes_shape = paddle.shape(bboxes)
boxes_shape.stop_gradient = True
scale_expand = paddle.expand(scale_expand, shape=boxes_shape)
bboxes = paddle.divide(bboxes, scale_expand)
if self.for_mot:
results = paddle.concat([bboxes, scores, clses], axis=1)
return results, inds, topk_clses
else:
results = paddle.concat([clses, scores, bboxes], axis=1)
return results, paddle.shape(results)[0:1], topk_clses
@register
class DETRBBoxPostProcess(object):
__shared__ = ['num_classes', 'use_focal_loss']
__inject__ = []
def __init__(self,
num_classes=80,
num_top_queries=100,
use_focal_loss=False):
super(DETRBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.num_top_queries = num_top_queries
self.use_focal_loss = use_focal_loss
def __call__(self, head_out, im_shape, scale_factor):
"""
Decode the bbox.
Args:
head_out (tuple): bbox_pred, cls_logit and masks of bbox_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [bs], and is N.
"""
bboxes, logits, masks = head_out
bbox_pred = bbox_cxcywh_to_xyxy(bboxes)
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
img_h, img_w = origin_shape.unbind(1)
origin_shape = paddle.stack(
[img_w, img_h, img_w, img_h], axis=-1).unsqueeze(0)
bbox_pred *= origin_shape
scores = F.sigmoid(logits) if self.use_focal_loss else F.softmax(
logits)[:, :, :-1]
if not self.use_focal_loss:
scores, labels = scores.max(-1), scores.argmax(-1)
if scores.shape[1] > self.num_top_queries:
scores, index = paddle.topk(
scores, self.num_top_queries, axis=-1)
labels = paddle.stack(
[paddle.gather(l, i) for l, i in zip(labels, index)])
bbox_pred = paddle.stack(
[paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
else:
scores, index = paddle.topk(
scores.reshape([logits.shape[0], -1]),
self.num_top_queries,
axis=-1)
labels = index % logits.shape[2]
index = index // logits.shape[2]
bbox_pred = paddle.stack(
[paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
bbox_pred = paddle.concat(
[
labels.unsqueeze(-1).astype('float32'), scores.unsqueeze(-1),
bbox_pred
],
axis=-1)
bbox_num = paddle.to_tensor(
bbox_pred.shape[1], dtype='int32').tile([bbox_pred.shape[0]])
bbox_pred = bbox_pred.reshape([-1, 6])
return bbox_pred, bbox_num
@register
class SparsePostProcess(object):
__shared__ = ['num_classes']
def __init__(self, num_proposals, num_classes=80):
super(SparsePostProcess, self).__init__()
self.num_classes = num_classes
self.num_proposals = num_proposals
def __call__(self, box_cls, box_pred, scale_factor_wh, img_whwh):
"""
Arguments:
box_cls (Tensor): tensor of shape (batch_size, num_proposals, K).
The tensor predicts the classification probability for each proposal.
box_pred (Tensor): tensors of shape (batch_size, num_proposals, 4).
The tensor predicts 4-vector (x,y,w,h) box
regression values for every proposal
scale_factor_wh (Tensor): tensors of shape [batch_size, 2] the scalor of per img
img_whwh (Tensor): tensors of shape [batch_size, 4]
Returns:
bbox_pred (Tensor): tensors of shape [num_boxes, 6] Each row has 6 values:
[label, confidence, xmin, ymin, xmax, ymax]
bbox_num (Tensor): tensors of shape [batch_size] the number of RoIs in each image.
"""
assert len(box_cls) == len(scale_factor_wh) == len(img_whwh)
img_wh = img_whwh[:, :2]
scores = F.sigmoid(box_cls)
labels = paddle.arange(0, self.num_classes). \
unsqueeze(0).tile([self.num_proposals, 1]).flatten(start_axis=0, stop_axis=1)
classes_all = []
scores_all = []
boxes_all = []
for i, (scores_per_image,
box_pred_per_image) in enumerate(zip(scores, box_pred)):
scores_per_image, topk_indices = scores_per_image.flatten(
0, 1).topk(
self.num_proposals, sorted=False)
labels_per_image = paddle.gather(labels, topk_indices, axis=0)
box_pred_per_image = box_pred_per_image.reshape([-1, 1, 4]).tile(
[1, self.num_classes, 1]).reshape([-1, 4])
box_pred_per_image = paddle.gather(
box_pred_per_image, topk_indices, axis=0)
classes_all.append(labels_per_image)
scores_all.append(scores_per_image)
boxes_all.append(box_pred_per_image)
bbox_num = paddle.zeros([len(scale_factor_wh)], dtype="int32")
boxes_final = []
for i in range(len(scale_factor_wh)):
classes = classes_all[i]
boxes = boxes_all[i]
scores = scores_all[i]
boxes[:, 0::2] = paddle.clip(
boxes[:, 0::2], min=0, max=img_wh[i][0]) / scale_factor_wh[i][0]
boxes[:, 1::2] = paddle.clip(
boxes[:, 1::2], min=0, max=img_wh[i][1]) / scale_factor_wh[i][1]
boxes_w, boxes_h = (boxes[:, 2] - boxes[:, 0]).numpy(), (
boxes[:, 3] - boxes[:, 1]).numpy()
keep = (boxes_w > 1.) & (boxes_h > 1.)
if (keep.sum() == 0):
bboxes = paddle.zeros([1, 6]).astype("float32")
else:
boxes = paddle.to_tensor(boxes.numpy()[keep]).astype("float32")
classes = paddle.to_tensor(classes.numpy()[keep]).astype(
"float32").unsqueeze(-1)
scores = paddle.to_tensor(scores.numpy()[keep]).astype(
"float32").unsqueeze(-1)
bboxes = paddle.concat([classes, scores, boxes], axis=-1)
boxes_final.append(bboxes)
bbox_num[i] = bboxes.shape[0]
bbox_pred = paddle.concat(boxes_final)
return bbox_pred, bbox_num
def nms(dets, thresh):
"""Apply classic DPM-style greedy NMS."""
if dets.shape[0] == 0:
return dets[[], :]
scores = dets[:, 0]
x1 = dets[:, 1]
y1 = dets[:, 2]
x2 = dets[:, 3]
y2 = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
ndets = dets.shape[0]
suppressed = np.zeros((ndets), dtype=np.int)
# nominal indices
# _i, _j
# sorted indices
# i, j
# temp variables for box i's (the box currently under consideration)
# ix1, iy1, ix2, iy2, iarea
# variables for computing overlap with box j (lower scoring box)
# xx1, yy1, xx2, yy2
# w, h
# inter, ovr
for _i in range(ndets):
i = order[_i]
if suppressed[i] == 1:
continue
ix1 = x1[i]
iy1 = y1[i]
ix2 = x2[i]
iy2 = y2[i]
iarea = areas[i]
for _j in range(_i + 1, ndets):
j = order[_j]
if suppressed[j] == 1:
continue
xx1 = max(ix1, x1[j])
yy1 = max(iy1, y1[j])
xx2 = min(ix2, x2[j])
yy2 = min(iy2, y2[j])
w = max(0.0, xx2 - xx1 + 1)
h = max(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (iarea + areas[j] - inter)
if ovr >= thresh:
suppressed[j] = 1
keep = np.where(suppressed == 0)[0]
dets = dets[keep, :]
return dets
|
forward
|
Decode the bbox and do NMS for JDE model.
Args:
head_out (list): Bbox_pred and cls_prob of bbox_head output.
anchors (list): Anchors of JDE model.
Returns:
boxes_idx (Tensor): The index of kept bboxes after decode 'JDEBox'.
bbox_pred (Tensor): The output is the prediction with shape [N, 6]
including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction of each batch with shape [N].
nms_keep_idx (Tensor): The index of kept bboxes after NMS.
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register
from ppdet.modeling.bbox_utils import nonempty_bbox, rbox2poly
from ppdet.modeling.layers import TTFBox
from .transformers import bbox_cxcywh_to_xyxy
try:
from collections.abc import Sequence
except Exception:
from collections import Sequence
__all__ = [
'BBoxPostProcess', 'MaskPostProcess', 'FCOSPostProcess',
'S2ANetBBoxPostProcess', 'JDEBBoxPostProcess', 'CenterNetPostProcess',
'DETRBBoxPostProcess', 'SparsePostProcess'
]
@register
class BBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['decode', 'nms']
def __init__(self, num_classes=80, decode=None, nms=None):
super(BBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.decode = decode
self.nms = nms
self.fake_bboxes = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
def forward(self, head_out, rois, im_shape, scale_factor):
"""
Decode the bbox and do NMS if needed.
Args:
head_out (tuple): bbox_pred and cls_prob of bbox_head output.
rois (tuple): roi and rois_num of rpn_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
"""
if self.nms is not None:
bboxes, score = self.decode(head_out, rois, im_shape, scale_factor)
bbox_pred, bbox_num, _ = self.nms(bboxes, score, self.num_classes)
else:
bbox_pred, bbox_num = self.decode(head_out, rois, im_shape,
scale_factor)
return bbox_pred, bbox_num
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
"""
Rescale, clip and filter the bbox from the output of NMS to
get final prediction.
Notes:
Currently only support bs = 1.
Args:
bboxes (Tensor): The output bboxes with shape [N, 6] after decode
and NMS, including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
pred_result (Tensor): The final prediction results with shape [N, 6]
including labels, scores and bboxes.
"""
if bboxes.shape[0] == 0:
bboxes = self.fake_bboxes
bbox_num = self.fake_bbox_num
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.expand(origin_shape[i:i + 1, :],
[bbox_num[i], 2])
scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
scale = paddle.concat([scale_x, scale_y, scale_x, scale_y])
expand_scale = paddle.expand(scale, [bbox_num[i], 4])
origin_shape_list.append(expand_shape)
scale_factor_list.append(expand_scale)
self.origin_shape_list = paddle.concat(origin_shape_list)
scale_factor_list = paddle.concat(scale_factor_list)
# bboxes: [N, 6], label, score, bbox
pred_label = bboxes[:, 0:1]
pred_score = bboxes[:, 1:2]
pred_bbox = bboxes[:, 2:]
# rescale bbox to original image
scaled_bbox = pred_bbox / scale_factor_list
origin_h = self.origin_shape_list[:, 0]
origin_w = self.origin_shape_list[:, 1]
zeros = paddle.zeros_like(origin_h)
# clip bbox to [0, original_size]
x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros)
y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros)
x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros)
y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros)
pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1)
# filter empty bbox
keep_mask = nonempty_bbox(pred_bbox, return_mask=True)
keep_mask = paddle.unsqueeze(keep_mask, [1])
pred_label = paddle.where(keep_mask, pred_label,
paddle.ones_like(pred_label) * -1)
pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1)
return pred_result
def get_origin_shape(self, ):
return self.origin_shape_list
@register
class MaskPostProcess(object):
"""
refer to:
https://github.com/facebookresearch/detectron2/layers/mask_ops.py
Get Mask output according to the output from model
"""
def __init__(self, binary_thresh=0.5):
super(MaskPostProcess, self).__init__()
self.binary_thresh = binary_thresh
def paste_mask(self, masks, boxes, im_h, im_w):
"""
Paste the mask prediction to the original image.
"""
x0, y0, x1, y1 = paddle.split(boxes, 4, axis=1)
masks = paddle.unsqueeze(masks, [0, 1])
img_y = paddle.arange(0, im_h, dtype='float32') + 0.5
img_x = paddle.arange(0, im_w, dtype='float32') + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
img_x = paddle.unsqueeze(img_x, [1])
img_y = paddle.unsqueeze(img_y, [2])
N = boxes.shape[0]
gx = paddle.expand(img_x, [N, img_y.shape[1], img_x.shape[2]])
gy = paddle.expand(img_y, [N, img_y.shape[1], img_x.shape[2]])
grid = paddle.stack([gx, gy], axis=3)
img_masks = F.grid_sample(masks, grid, align_corners=False)
return img_masks[:, 0]
def __call__(self, mask_out, bboxes, bbox_num, origin_shape):
"""
Decode the mask_out and paste the mask to the origin image.
Args:
mask_out (Tensor): mask_head output with shape [N, 28, 28].
bbox_pred (Tensor): The output bboxes with shape [N, 6] after decode
and NMS, including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
origin_shape (Tensor): The origin shape of the input image, the tensor
shape is [N, 2], and each row is [h, w].
Returns:
pred_result (Tensor): The final prediction mask results with shape
[N, h, w] in binary mask style.
"""
num_mask = mask_out.shape[0]
origin_shape = paddle.cast(origin_shape, 'int32')
# TODO: support bs > 1 and mask output dtype is bool
pred_result = paddle.zeros(
[num_mask, origin_shape[0][0], origin_shape[0][1]], dtype='int32')
if bbox_num == 1 and bboxes[0][0] == -1:
return pred_result
# TODO: optimize chunk paste
pred_result = []
for i in range(bboxes.shape[0]):
im_h, im_w = origin_shape[i][0], origin_shape[i][1]
pred_mask = self.paste_mask(mask_out[i], bboxes[i:i + 1, 2:], im_h,
im_w)
pred_mask = pred_mask >= self.binary_thresh
pred_mask = paddle.cast(pred_mask, 'int32')
pred_result.append(pred_mask)
pred_result = paddle.concat(pred_result)
return pred_result
@register
class FCOSPostProcess(object):
__inject__ = ['decode', 'nms']
def __init__(self, decode=None, nms=None):
super(FCOSPostProcess, self).__init__()
self.decode = decode
self.nms = nms
def __call__(self, fcos_head_outs, scale_factor):
"""
Decode the bbox and do NMS in FCOS.
"""
locations, cls_logits, bboxes_reg, centerness = fcos_head_outs
bboxes, score = self.decode(locations, cls_logits, bboxes_reg,
centerness, scale_factor)
bbox_pred, bbox_num, _ = self.nms(bboxes, score)
return bbox_pred, bbox_num
@register
class S2ANetBBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['nms']
def __init__(self, num_classes=15, nms_pre=2000, min_bbox_size=0, nms=None):
super(S2ANetBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.nms_pre = paddle.to_tensor(nms_pre)
self.min_bbox_size = min_bbox_size
self.nms = nms
self.origin_shape_list = []
self.fake_pred_cls_score_bbox = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
def forward(self, pred_scores, pred_bboxes):
"""
pred_scores : [N, M] score
pred_bboxes : [N, 5] xc, yc, w, h, a
im_shape : [N, 2] im_shape
scale_factor : [N, 2] scale_factor
"""
pred_ploys0 = rbox2poly(pred_bboxes)
pred_ploys = paddle.unsqueeze(pred_ploys0, axis=0)
# pred_scores [NA, 16] --> [16, NA]
pred_scores0 = paddle.transpose(pred_scores, [1, 0])
pred_scores = paddle.unsqueeze(pred_scores0, axis=0)
pred_cls_score_bbox, bbox_num, _ = self.nms(pred_ploys, pred_scores,
self.num_classes)
# Prevent empty bbox_pred from decode or NMS.
# Bboxes and score before NMS may be empty due to the score threshold.
if pred_cls_score_bbox.shape[0] <= 0 or pred_cls_score_bbox.shape[
1] <= 1:
pred_cls_score_bbox = self.fake_pred_cls_score_bbox
bbox_num = self.fake_bbox_num
pred_cls_score_bbox = paddle.reshape(pred_cls_score_bbox, [-1, 10])
return pred_cls_score_bbox, bbox_num
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
"""
Rescale, clip and filter the bbox from the output of NMS to
get final prediction.
Args:
bboxes(Tensor): bboxes [N, 10]
bbox_num(Tensor): bbox_num
im_shape(Tensor): [1 2]
scale_factor(Tensor): [1 2]
Returns:
bbox_pred(Tensor): The output is the prediction with shape [N, 8]
including labels, scores and bboxes. The size of
bboxes are corresponding to the original image.
"""
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.expand(origin_shape[i:i + 1, :],
[bbox_num[i], 2])
scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
scale = paddle.concat([
scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x,
scale_y
])
expand_scale = paddle.expand(scale, [bbox_num[i], 8])
origin_shape_list.append(expand_shape)
scale_factor_list.append(expand_scale)
origin_shape_list = paddle.concat(origin_shape_list)
scale_factor_list = paddle.concat(scale_factor_list)
# bboxes: [N, 10], label, score, bbox
pred_label_score = bboxes[:, 0:2]
pred_bbox = bboxes[:, 2:]
# rescale bbox to original image
pred_bbox = pred_bbox.reshape([-1, 8])
scaled_bbox = pred_bbox / scale_factor_list
origin_h = origin_shape_list[:, 0]
origin_w = origin_shape_list[:, 1]
bboxes = scaled_bbox
zeros = paddle.zeros_like(origin_h)
x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], origin_w - 1), zeros)
y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], origin_h - 1), zeros)
x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], origin_w - 1), zeros)
y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], origin_h - 1), zeros)
x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], origin_w - 1), zeros)
y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], origin_h - 1), zeros)
x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], origin_w - 1), zeros)
y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], origin_h - 1), zeros)
pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=-1)
pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1)
return pred_result
@register
class JDEBBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['decode', 'nms']
def __init__(self, num_classes=1, decode=None, nms=None, return_idx=True):
super(JDEBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.decode = decode
self.nms = nms
self.return_idx = return_idx
self.fake_bbox_pred = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
self.fake_nms_keep_idx = paddle.to_tensor(
np.array(
[[0]], dtype='int32'))
self.fake_yolo_boxes_out = paddle.to_tensor(
np.array(
[[[0.0, 0.0, 0.0, 0.0]]], dtype='float32'))
self.fake_yolo_scores_out = paddle.to_tensor(
np.array(
[[[0.0]]], dtype='float32'))
self.fake_boxes_idx = paddle.to_tensor(np.array([[0]], dtype='int64'))
# MASKED: forward function (lines 363-407)
@register
class CenterNetPostProcess(TTFBox):
"""
Postprocess the model outputs to get final prediction:
1. Do NMS for heatmap to get top `max_per_img` bboxes.
2. Decode bboxes using center offset and box size.
3. Rescale decoded bboxes reference to the origin image shape.
Args:
max_per_img(int): the maximum number of predicted objects in a image,
500 by default.
down_ratio(int): the down ratio from images to heatmap, 4 by default.
regress_ltrb (bool): whether to regress left/top/right/bottom or
width/height for a box, true by default.
for_mot (bool): whether return other features used in tracking model.
"""
__shared__ = ['down_ratio', 'for_mot']
def __init__(self,
max_per_img=500,
down_ratio=4,
regress_ltrb=True,
for_mot=False):
super(TTFBox, self).__init__()
self.max_per_img = max_per_img
self.down_ratio = down_ratio
self.regress_ltrb = regress_ltrb
self.for_mot = for_mot
def __call__(self, hm, wh, reg, im_shape, scale_factor):
heat = self._simple_nms(hm)
scores, inds, topk_clses, ys, xs = self._topk(heat)
scores = paddle.tensor.unsqueeze(scores, [1])
clses = paddle.tensor.unsqueeze(topk_clses, [1])
reg_t = paddle.transpose(reg, [0, 2, 3, 1])
# Like TTFBox, batch size is 1.
# TODO: support batch size > 1
reg = paddle.reshape(reg_t, [-1, paddle.shape(reg_t)[-1]])
reg = paddle.gather(reg, inds)
xs = paddle.cast(xs, 'float32')
ys = paddle.cast(ys, 'float32')
xs = xs + reg[:, 0:1]
ys = ys + reg[:, 1:2]
wh_t = paddle.transpose(wh, [0, 2, 3, 1])
wh = paddle.reshape(wh_t, [-1, paddle.shape(wh_t)[-1]])
wh = paddle.gather(wh, inds)
if self.regress_ltrb:
x1 = xs - wh[:, 0:1]
y1 = ys - wh[:, 1:2]
x2 = xs + wh[:, 2:3]
y2 = ys + wh[:, 3:4]
else:
x1 = xs - wh[:, 0:1] / 2
y1 = ys - wh[:, 1:2] / 2
x2 = xs + wh[:, 0:1] / 2
y2 = ys + wh[:, 1:2] / 2
n, c, feat_h, feat_w = hm.shape[:]
padw = (feat_w * self.down_ratio - im_shape[0, 1]) / 2
padh = (feat_h * self.down_ratio - im_shape[0, 0]) / 2
x1 = x1 * self.down_ratio
y1 = y1 * self.down_ratio
x2 = x2 * self.down_ratio
y2 = y2 * self.down_ratio
x1 = x1 - padw
y1 = y1 - padh
x2 = x2 - padw
y2 = y2 - padh
bboxes = paddle.concat([x1, y1, x2, y2], axis=1)
scale_y = scale_factor[:, 0:1]
scale_x = scale_factor[:, 1:2]
scale_expand = paddle.concat(
[scale_x, scale_y, scale_x, scale_y], axis=1)
boxes_shape = paddle.shape(bboxes)
boxes_shape.stop_gradient = True
scale_expand = paddle.expand(scale_expand, shape=boxes_shape)
bboxes = paddle.divide(bboxes, scale_expand)
if self.for_mot:
results = paddle.concat([bboxes, scores, clses], axis=1)
return results, inds, topk_clses
else:
results = paddle.concat([clses, scores, bboxes], axis=1)
return results, paddle.shape(results)[0:1], topk_clses
@register
class DETRBBoxPostProcess(object):
__shared__ = ['num_classes', 'use_focal_loss']
__inject__ = []
def __init__(self,
num_classes=80,
num_top_queries=100,
use_focal_loss=False):
super(DETRBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.num_top_queries = num_top_queries
self.use_focal_loss = use_focal_loss
def __call__(self, head_out, im_shape, scale_factor):
"""
Decode the bbox.
Args:
head_out (tuple): bbox_pred, cls_logit and masks of bbox_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [bs], and is N.
"""
bboxes, logits, masks = head_out
bbox_pred = bbox_cxcywh_to_xyxy(bboxes)
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
img_h, img_w = origin_shape.unbind(1)
origin_shape = paddle.stack(
[img_w, img_h, img_w, img_h], axis=-1).unsqueeze(0)
bbox_pred *= origin_shape
scores = F.sigmoid(logits) if self.use_focal_loss else F.softmax(
logits)[:, :, :-1]
if not self.use_focal_loss:
scores, labels = scores.max(-1), scores.argmax(-1)
if scores.shape[1] > self.num_top_queries:
scores, index = paddle.topk(
scores, self.num_top_queries, axis=-1)
labels = paddle.stack(
[paddle.gather(l, i) for l, i in zip(labels, index)])
bbox_pred = paddle.stack(
[paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
else:
scores, index = paddle.topk(
scores.reshape([logits.shape[0], -1]),
self.num_top_queries,
axis=-1)
labels = index % logits.shape[2]
index = index // logits.shape[2]
bbox_pred = paddle.stack(
[paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
bbox_pred = paddle.concat(
[
labels.unsqueeze(-1).astype('float32'), scores.unsqueeze(-1),
bbox_pred
],
axis=-1)
bbox_num = paddle.to_tensor(
bbox_pred.shape[1], dtype='int32').tile([bbox_pred.shape[0]])
bbox_pred = bbox_pred.reshape([-1, 6])
return bbox_pred, bbox_num
@register
class SparsePostProcess(object):
__shared__ = ['num_classes']
def __init__(self, num_proposals, num_classes=80):
super(SparsePostProcess, self).__init__()
self.num_classes = num_classes
self.num_proposals = num_proposals
def __call__(self, box_cls, box_pred, scale_factor_wh, img_whwh):
"""
Arguments:
box_cls (Tensor): tensor of shape (batch_size, num_proposals, K).
The tensor predicts the classification probability for each proposal.
box_pred (Tensor): tensors of shape (batch_size, num_proposals, 4).
The tensor predicts 4-vector (x,y,w,h) box
regression values for every proposal
scale_factor_wh (Tensor): tensors of shape [batch_size, 2] the scalor of per img
img_whwh (Tensor): tensors of shape [batch_size, 4]
Returns:
bbox_pred (Tensor): tensors of shape [num_boxes, 6] Each row has 6 values:
[label, confidence, xmin, ymin, xmax, ymax]
bbox_num (Tensor): tensors of shape [batch_size] the number of RoIs in each image.
"""
assert len(box_cls) == len(scale_factor_wh) == len(img_whwh)
img_wh = img_whwh[:, :2]
scores = F.sigmoid(box_cls)
labels = paddle.arange(0, self.num_classes). \
unsqueeze(0).tile([self.num_proposals, 1]).flatten(start_axis=0, stop_axis=1)
classes_all = []
scores_all = []
boxes_all = []
for i, (scores_per_image,
box_pred_per_image) in enumerate(zip(scores, box_pred)):
scores_per_image, topk_indices = scores_per_image.flatten(
0, 1).topk(
self.num_proposals, sorted=False)
labels_per_image = paddle.gather(labels, topk_indices, axis=0)
box_pred_per_image = box_pred_per_image.reshape([-1, 1, 4]).tile(
[1, self.num_classes, 1]).reshape([-1, 4])
box_pred_per_image = paddle.gather(
box_pred_per_image, topk_indices, axis=0)
classes_all.append(labels_per_image)
scores_all.append(scores_per_image)
boxes_all.append(box_pred_per_image)
bbox_num = paddle.zeros([len(scale_factor_wh)], dtype="int32")
boxes_final = []
for i in range(len(scale_factor_wh)):
classes = classes_all[i]
boxes = boxes_all[i]
scores = scores_all[i]
boxes[:, 0::2] = paddle.clip(
boxes[:, 0::2], min=0, max=img_wh[i][0]) / scale_factor_wh[i][0]
boxes[:, 1::2] = paddle.clip(
boxes[:, 1::2], min=0, max=img_wh[i][1]) / scale_factor_wh[i][1]
boxes_w, boxes_h = (boxes[:, 2] - boxes[:, 0]).numpy(), (
boxes[:, 3] - boxes[:, 1]).numpy()
keep = (boxes_w > 1.) & (boxes_h > 1.)
if (keep.sum() == 0):
bboxes = paddle.zeros([1, 6]).astype("float32")
else:
boxes = paddle.to_tensor(boxes.numpy()[keep]).astype("float32")
classes = paddle.to_tensor(classes.numpy()[keep]).astype(
"float32").unsqueeze(-1)
scores = paddle.to_tensor(scores.numpy()[keep]).astype(
"float32").unsqueeze(-1)
bboxes = paddle.concat([classes, scores, boxes], axis=-1)
boxes_final.append(bboxes)
bbox_num[i] = bboxes.shape[0]
bbox_pred = paddle.concat(boxes_final)
return bbox_pred, bbox_num
def nms(dets, thresh):
"""Apply classic DPM-style greedy NMS."""
if dets.shape[0] == 0:
return dets[[], :]
scores = dets[:, 0]
x1 = dets[:, 1]
y1 = dets[:, 2]
x2 = dets[:, 3]
y2 = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
ndets = dets.shape[0]
suppressed = np.zeros((ndets), dtype=np.int)
# nominal indices
# _i, _j
# sorted indices
# i, j
# temp variables for box i's (the box currently under consideration)
# ix1, iy1, ix2, iy2, iarea
# variables for computing overlap with box j (lower scoring box)
# xx1, yy1, xx2, yy2
# w, h
# inter, ovr
for _i in range(ndets):
i = order[_i]
if suppressed[i] == 1:
continue
ix1 = x1[i]
iy1 = y1[i]
ix2 = x2[i]
iy2 = y2[i]
iarea = areas[i]
for _j in range(_i + 1, ndets):
j = order[_j]
if suppressed[j] == 1:
continue
xx1 = max(ix1, x1[j])
yy1 = max(iy1, y1[j])
xx2 = min(ix2, x2[j])
yy2 = min(iy2, y2[j])
w = max(0.0, xx2 - xx1 + 1)
h = max(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (iarea + areas[j] - inter)
if ovr >= thresh:
suppressed[j] = 1
keep = np.where(suppressed == 0)[0]
dets = dets[keep, :]
return dets
|
def forward(self, head_out, anchors):
"""
Decode the bbox and do NMS for JDE model.
Args:
head_out (list): Bbox_pred and cls_prob of bbox_head output.
anchors (list): Anchors of JDE model.
Returns:
boxes_idx (Tensor): The index of kept bboxes after decode 'JDEBox'.
bbox_pred (Tensor): The output is the prediction with shape [N, 6]
including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction of each batch with shape [N].
nms_keep_idx (Tensor): The index of kept bboxes after NMS.
"""
boxes_idx, yolo_boxes_scores = self.decode(head_out, anchors)
if len(boxes_idx) == 0:
boxes_idx = self.fake_boxes_idx
yolo_boxes_out = self.fake_yolo_boxes_out
yolo_scores_out = self.fake_yolo_scores_out
else:
yolo_boxes = paddle.gather_nd(yolo_boxes_scores, boxes_idx)
# TODO: only support bs=1 now
yolo_boxes_out = paddle.reshape(
yolo_boxes[:, :4], shape=[1, len(boxes_idx), 4])
yolo_scores_out = paddle.reshape(
yolo_boxes[:, 4:5], shape=[1, 1, len(boxes_idx)])
boxes_idx = boxes_idx[:, 1:]
if self.return_idx:
bbox_pred, bbox_num, nms_keep_idx = self.nms(
yolo_boxes_out, yolo_scores_out, self.num_classes)
if bbox_pred.shape[0] == 0:
bbox_pred = self.fake_bbox_pred
bbox_num = self.fake_bbox_num
nms_keep_idx = self.fake_nms_keep_idx
return boxes_idx, bbox_pred, bbox_num, nms_keep_idx
else:
bbox_pred, bbox_num, _ = self.nms(yolo_boxes_out, yolo_scores_out,
self.num_classes)
if bbox_pred.shape[0] == 0:
bbox_pred = self.fake_bbox_pred
bbox_num = self.fake_bbox_num
return _, bbox_pred, bbox_num, _
| 363 | 407 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register
from ppdet.modeling.bbox_utils import nonempty_bbox, rbox2poly
from ppdet.modeling.layers import TTFBox
from .transformers import bbox_cxcywh_to_xyxy
try:
from collections.abc import Sequence
except Exception:
from collections import Sequence
__all__ = [
'BBoxPostProcess', 'MaskPostProcess', 'FCOSPostProcess',
'S2ANetBBoxPostProcess', 'JDEBBoxPostProcess', 'CenterNetPostProcess',
'DETRBBoxPostProcess', 'SparsePostProcess'
]
@register
class BBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['decode', 'nms']
def __init__(self, num_classes=80, decode=None, nms=None):
super(BBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.decode = decode
self.nms = nms
self.fake_bboxes = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
def forward(self, head_out, rois, im_shape, scale_factor):
"""
Decode the bbox and do NMS if needed.
Args:
head_out (tuple): bbox_pred and cls_prob of bbox_head output.
rois (tuple): roi and rois_num of rpn_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
"""
if self.nms is not None:
bboxes, score = self.decode(head_out, rois, im_shape, scale_factor)
bbox_pred, bbox_num, _ = self.nms(bboxes, score, self.num_classes)
else:
bbox_pred, bbox_num = self.decode(head_out, rois, im_shape,
scale_factor)
return bbox_pred, bbox_num
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
"""
Rescale, clip and filter the bbox from the output of NMS to
get final prediction.
Notes:
Currently only support bs = 1.
Args:
bboxes (Tensor): The output bboxes with shape [N, 6] after decode
and NMS, including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
pred_result (Tensor): The final prediction results with shape [N, 6]
including labels, scores and bboxes.
"""
if bboxes.shape[0] == 0:
bboxes = self.fake_bboxes
bbox_num = self.fake_bbox_num
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.expand(origin_shape[i:i + 1, :],
[bbox_num[i], 2])
scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
scale = paddle.concat([scale_x, scale_y, scale_x, scale_y])
expand_scale = paddle.expand(scale, [bbox_num[i], 4])
origin_shape_list.append(expand_shape)
scale_factor_list.append(expand_scale)
self.origin_shape_list = paddle.concat(origin_shape_list)
scale_factor_list = paddle.concat(scale_factor_list)
# bboxes: [N, 6], label, score, bbox
pred_label = bboxes[:, 0:1]
pred_score = bboxes[:, 1:2]
pred_bbox = bboxes[:, 2:]
# rescale bbox to original image
scaled_bbox = pred_bbox / scale_factor_list
origin_h = self.origin_shape_list[:, 0]
origin_w = self.origin_shape_list[:, 1]
zeros = paddle.zeros_like(origin_h)
# clip bbox to [0, original_size]
x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros)
y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros)
x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros)
y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros)
pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1)
# filter empty bbox
keep_mask = nonempty_bbox(pred_bbox, return_mask=True)
keep_mask = paddle.unsqueeze(keep_mask, [1])
pred_label = paddle.where(keep_mask, pred_label,
paddle.ones_like(pred_label) * -1)
pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1)
return pred_result
def get_origin_shape(self, ):
return self.origin_shape_list
@register
class MaskPostProcess(object):
"""
refer to:
https://github.com/facebookresearch/detectron2/layers/mask_ops.py
Get Mask output according to the output from model
"""
def __init__(self, binary_thresh=0.5):
super(MaskPostProcess, self).__init__()
self.binary_thresh = binary_thresh
def paste_mask(self, masks, boxes, im_h, im_w):
"""
Paste the mask prediction to the original image.
"""
x0, y0, x1, y1 = paddle.split(boxes, 4, axis=1)
masks = paddle.unsqueeze(masks, [0, 1])
img_y = paddle.arange(0, im_h, dtype='float32') + 0.5
img_x = paddle.arange(0, im_w, dtype='float32') + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
img_x = paddle.unsqueeze(img_x, [1])
img_y = paddle.unsqueeze(img_y, [2])
N = boxes.shape[0]
gx = paddle.expand(img_x, [N, img_y.shape[1], img_x.shape[2]])
gy = paddle.expand(img_y, [N, img_y.shape[1], img_x.shape[2]])
grid = paddle.stack([gx, gy], axis=3)
img_masks = F.grid_sample(masks, grid, align_corners=False)
return img_masks[:, 0]
def __call__(self, mask_out, bboxes, bbox_num, origin_shape):
"""
Decode the mask_out and paste the mask to the origin image.
Args:
mask_out (Tensor): mask_head output with shape [N, 28, 28].
bbox_pred (Tensor): The output bboxes with shape [N, 6] after decode
and NMS, including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
origin_shape (Tensor): The origin shape of the input image, the tensor
shape is [N, 2], and each row is [h, w].
Returns:
pred_result (Tensor): The final prediction mask results with shape
[N, h, w] in binary mask style.
"""
num_mask = mask_out.shape[0]
origin_shape = paddle.cast(origin_shape, 'int32')
# TODO: support bs > 1 and mask output dtype is bool
pred_result = paddle.zeros(
[num_mask, origin_shape[0][0], origin_shape[0][1]], dtype='int32')
if bbox_num == 1 and bboxes[0][0] == -1:
return pred_result
# TODO: optimize chunk paste
pred_result = []
for i in range(bboxes.shape[0]):
im_h, im_w = origin_shape[i][0], origin_shape[i][1]
pred_mask = self.paste_mask(mask_out[i], bboxes[i:i + 1, 2:], im_h,
im_w)
pred_mask = pred_mask >= self.binary_thresh
pred_mask = paddle.cast(pred_mask, 'int32')
pred_result.append(pred_mask)
pred_result = paddle.concat(pred_result)
return pred_result
@register
class FCOSPostProcess(object):
__inject__ = ['decode', 'nms']
def __init__(self, decode=None, nms=None):
super(FCOSPostProcess, self).__init__()
self.decode = decode
self.nms = nms
def __call__(self, fcos_head_outs, scale_factor):
"""
Decode the bbox and do NMS in FCOS.
"""
locations, cls_logits, bboxes_reg, centerness = fcos_head_outs
bboxes, score = self.decode(locations, cls_logits, bboxes_reg,
centerness, scale_factor)
bbox_pred, bbox_num, _ = self.nms(bboxes, score)
return bbox_pred, bbox_num
@register
class S2ANetBBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['nms']
def __init__(self, num_classes=15, nms_pre=2000, min_bbox_size=0, nms=None):
super(S2ANetBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.nms_pre = paddle.to_tensor(nms_pre)
self.min_bbox_size = min_bbox_size
self.nms = nms
self.origin_shape_list = []
self.fake_pred_cls_score_bbox = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
def forward(self, pred_scores, pred_bboxes):
"""
pred_scores : [N, M] score
pred_bboxes : [N, 5] xc, yc, w, h, a
im_shape : [N, 2] im_shape
scale_factor : [N, 2] scale_factor
"""
pred_ploys0 = rbox2poly(pred_bboxes)
pred_ploys = paddle.unsqueeze(pred_ploys0, axis=0)
# pred_scores [NA, 16] --> [16, NA]
pred_scores0 = paddle.transpose(pred_scores, [1, 0])
pred_scores = paddle.unsqueeze(pred_scores0, axis=0)
pred_cls_score_bbox, bbox_num, _ = self.nms(pred_ploys, pred_scores,
self.num_classes)
# Prevent empty bbox_pred from decode or NMS.
# Bboxes and score before NMS may be empty due to the score threshold.
if pred_cls_score_bbox.shape[0] <= 0 or pred_cls_score_bbox.shape[
1] <= 1:
pred_cls_score_bbox = self.fake_pred_cls_score_bbox
bbox_num = self.fake_bbox_num
pred_cls_score_bbox = paddle.reshape(pred_cls_score_bbox, [-1, 10])
return pred_cls_score_bbox, bbox_num
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
"""
Rescale, clip and filter the bbox from the output of NMS to
get final prediction.
Args:
bboxes(Tensor): bboxes [N, 10]
bbox_num(Tensor): bbox_num
im_shape(Tensor): [1 2]
scale_factor(Tensor): [1 2]
Returns:
bbox_pred(Tensor): The output is the prediction with shape [N, 8]
including labels, scores and bboxes. The size of
bboxes are corresponding to the original image.
"""
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.expand(origin_shape[i:i + 1, :],
[bbox_num[i], 2])
scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
scale = paddle.concat([
scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x,
scale_y
])
expand_scale = paddle.expand(scale, [bbox_num[i], 8])
origin_shape_list.append(expand_shape)
scale_factor_list.append(expand_scale)
origin_shape_list = paddle.concat(origin_shape_list)
scale_factor_list = paddle.concat(scale_factor_list)
# bboxes: [N, 10], label, score, bbox
pred_label_score = bboxes[:, 0:2]
pred_bbox = bboxes[:, 2:]
# rescale bbox to original image
pred_bbox = pred_bbox.reshape([-1, 8])
scaled_bbox = pred_bbox / scale_factor_list
origin_h = origin_shape_list[:, 0]
origin_w = origin_shape_list[:, 1]
bboxes = scaled_bbox
zeros = paddle.zeros_like(origin_h)
x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], origin_w - 1), zeros)
y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], origin_h - 1), zeros)
x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], origin_w - 1), zeros)
y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], origin_h - 1), zeros)
x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], origin_w - 1), zeros)
y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], origin_h - 1), zeros)
x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], origin_w - 1), zeros)
y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], origin_h - 1), zeros)
pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=-1)
pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1)
return pred_result
@register
class JDEBBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['decode', 'nms']
def __init__(self, num_classes=1, decode=None, nms=None, return_idx=True):
super(JDEBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.decode = decode
self.nms = nms
self.return_idx = return_idx
self.fake_bbox_pred = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
self.fake_nms_keep_idx = paddle.to_tensor(
np.array(
[[0]], dtype='int32'))
self.fake_yolo_boxes_out = paddle.to_tensor(
np.array(
[[[0.0, 0.0, 0.0, 0.0]]], dtype='float32'))
self.fake_yolo_scores_out = paddle.to_tensor(
np.array(
[[[0.0]]], dtype='float32'))
self.fake_boxes_idx = paddle.to_tensor(np.array([[0]], dtype='int64'))
def forward(self, head_out, anchors):
"""
Decode the bbox and do NMS for JDE model.
Args:
head_out (list): Bbox_pred and cls_prob of bbox_head output.
anchors (list): Anchors of JDE model.
Returns:
boxes_idx (Tensor): The index of kept bboxes after decode 'JDEBox'.
bbox_pred (Tensor): The output is the prediction with shape [N, 6]
including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction of each batch with shape [N].
nms_keep_idx (Tensor): The index of kept bboxes after NMS.
"""
boxes_idx, yolo_boxes_scores = self.decode(head_out, anchors)
if len(boxes_idx) == 0:
boxes_idx = self.fake_boxes_idx
yolo_boxes_out = self.fake_yolo_boxes_out
yolo_scores_out = self.fake_yolo_scores_out
else:
yolo_boxes = paddle.gather_nd(yolo_boxes_scores, boxes_idx)
# TODO: only support bs=1 now
yolo_boxes_out = paddle.reshape(
yolo_boxes[:, :4], shape=[1, len(boxes_idx), 4])
yolo_scores_out = paddle.reshape(
yolo_boxes[:, 4:5], shape=[1, 1, len(boxes_idx)])
boxes_idx = boxes_idx[:, 1:]
if self.return_idx:
bbox_pred, bbox_num, nms_keep_idx = self.nms(
yolo_boxes_out, yolo_scores_out, self.num_classes)
if bbox_pred.shape[0] == 0:
bbox_pred = self.fake_bbox_pred
bbox_num = self.fake_bbox_num
nms_keep_idx = self.fake_nms_keep_idx
return boxes_idx, bbox_pred, bbox_num, nms_keep_idx
else:
bbox_pred, bbox_num, _ = self.nms(yolo_boxes_out, yolo_scores_out,
self.num_classes)
if bbox_pred.shape[0] == 0:
bbox_pred = self.fake_bbox_pred
bbox_num = self.fake_bbox_num
return _, bbox_pred, bbox_num, _
@register
class CenterNetPostProcess(TTFBox):
"""
Postprocess the model outputs to get final prediction:
1. Do NMS for heatmap to get top `max_per_img` bboxes.
2. Decode bboxes using center offset and box size.
3. Rescale decoded bboxes reference to the origin image shape.
Args:
max_per_img(int): the maximum number of predicted objects in a image,
500 by default.
down_ratio(int): the down ratio from images to heatmap, 4 by default.
regress_ltrb (bool): whether to regress left/top/right/bottom or
width/height for a box, true by default.
for_mot (bool): whether return other features used in tracking model.
"""
__shared__ = ['down_ratio', 'for_mot']
def __init__(self,
max_per_img=500,
down_ratio=4,
regress_ltrb=True,
for_mot=False):
super(TTFBox, self).__init__()
self.max_per_img = max_per_img
self.down_ratio = down_ratio
self.regress_ltrb = regress_ltrb
self.for_mot = for_mot
def __call__(self, hm, wh, reg, im_shape, scale_factor):
heat = self._simple_nms(hm)
scores, inds, topk_clses, ys, xs = self._topk(heat)
scores = paddle.tensor.unsqueeze(scores, [1])
clses = paddle.tensor.unsqueeze(topk_clses, [1])
reg_t = paddle.transpose(reg, [0, 2, 3, 1])
# Like TTFBox, batch size is 1.
# TODO: support batch size > 1
reg = paddle.reshape(reg_t, [-1, paddle.shape(reg_t)[-1]])
reg = paddle.gather(reg, inds)
xs = paddle.cast(xs, 'float32')
ys = paddle.cast(ys, 'float32')
xs = xs + reg[:, 0:1]
ys = ys + reg[:, 1:2]
wh_t = paddle.transpose(wh, [0, 2, 3, 1])
wh = paddle.reshape(wh_t, [-1, paddle.shape(wh_t)[-1]])
wh = paddle.gather(wh, inds)
if self.regress_ltrb:
x1 = xs - wh[:, 0:1]
y1 = ys - wh[:, 1:2]
x2 = xs + wh[:, 2:3]
y2 = ys + wh[:, 3:4]
else:
x1 = xs - wh[:, 0:1] / 2
y1 = ys - wh[:, 1:2] / 2
x2 = xs + wh[:, 0:1] / 2
y2 = ys + wh[:, 1:2] / 2
n, c, feat_h, feat_w = hm.shape[:]
padw = (feat_w * self.down_ratio - im_shape[0, 1]) / 2
padh = (feat_h * self.down_ratio - im_shape[0, 0]) / 2
x1 = x1 * self.down_ratio
y1 = y1 * self.down_ratio
x2 = x2 * self.down_ratio
y2 = y2 * self.down_ratio
x1 = x1 - padw
y1 = y1 - padh
x2 = x2 - padw
y2 = y2 - padh
bboxes = paddle.concat([x1, y1, x2, y2], axis=1)
scale_y = scale_factor[:, 0:1]
scale_x = scale_factor[:, 1:2]
scale_expand = paddle.concat(
[scale_x, scale_y, scale_x, scale_y], axis=1)
boxes_shape = paddle.shape(bboxes)
boxes_shape.stop_gradient = True
scale_expand = paddle.expand(scale_expand, shape=boxes_shape)
bboxes = paddle.divide(bboxes, scale_expand)
if self.for_mot:
results = paddle.concat([bboxes, scores, clses], axis=1)
return results, inds, topk_clses
else:
results = paddle.concat([clses, scores, bboxes], axis=1)
return results, paddle.shape(results)[0:1], topk_clses
@register
class DETRBBoxPostProcess(object):
__shared__ = ['num_classes', 'use_focal_loss']
__inject__ = []
def __init__(self,
num_classes=80,
num_top_queries=100,
use_focal_loss=False):
super(DETRBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.num_top_queries = num_top_queries
self.use_focal_loss = use_focal_loss
def __call__(self, head_out, im_shape, scale_factor):
"""
Decode the bbox.
Args:
head_out (tuple): bbox_pred, cls_logit and masks of bbox_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [bs], and is N.
"""
bboxes, logits, masks = head_out
bbox_pred = bbox_cxcywh_to_xyxy(bboxes)
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
img_h, img_w = origin_shape.unbind(1)
origin_shape = paddle.stack(
[img_w, img_h, img_w, img_h], axis=-1).unsqueeze(0)
bbox_pred *= origin_shape
scores = F.sigmoid(logits) if self.use_focal_loss else F.softmax(
logits)[:, :, :-1]
if not self.use_focal_loss:
scores, labels = scores.max(-1), scores.argmax(-1)
if scores.shape[1] > self.num_top_queries:
scores, index = paddle.topk(
scores, self.num_top_queries, axis=-1)
labels = paddle.stack(
[paddle.gather(l, i) for l, i in zip(labels, index)])
bbox_pred = paddle.stack(
[paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
else:
scores, index = paddle.topk(
scores.reshape([logits.shape[0], -1]),
self.num_top_queries,
axis=-1)
labels = index % logits.shape[2]
index = index // logits.shape[2]
bbox_pred = paddle.stack(
[paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
bbox_pred = paddle.concat(
[
labels.unsqueeze(-1).astype('float32'), scores.unsqueeze(-1),
bbox_pred
],
axis=-1)
bbox_num = paddle.to_tensor(
bbox_pred.shape[1], dtype='int32').tile([bbox_pred.shape[0]])
bbox_pred = bbox_pred.reshape([-1, 6])
return bbox_pred, bbox_num
@register
class SparsePostProcess(object):
__shared__ = ['num_classes']
def __init__(self, num_proposals, num_classes=80):
super(SparsePostProcess, self).__init__()
self.num_classes = num_classes
self.num_proposals = num_proposals
def __call__(self, box_cls, box_pred, scale_factor_wh, img_whwh):
"""
Arguments:
box_cls (Tensor): tensor of shape (batch_size, num_proposals, K).
The tensor predicts the classification probability for each proposal.
box_pred (Tensor): tensors of shape (batch_size, num_proposals, 4).
The tensor predicts 4-vector (x,y,w,h) box
regression values for every proposal
scale_factor_wh (Tensor): tensors of shape [batch_size, 2] the scalor of per img
img_whwh (Tensor): tensors of shape [batch_size, 4]
Returns:
bbox_pred (Tensor): tensors of shape [num_boxes, 6] Each row has 6 values:
[label, confidence, xmin, ymin, xmax, ymax]
bbox_num (Tensor): tensors of shape [batch_size] the number of RoIs in each image.
"""
assert len(box_cls) == len(scale_factor_wh) == len(img_whwh)
img_wh = img_whwh[:, :2]
scores = F.sigmoid(box_cls)
labels = paddle.arange(0, self.num_classes). \
unsqueeze(0).tile([self.num_proposals, 1]).flatten(start_axis=0, stop_axis=1)
classes_all = []
scores_all = []
boxes_all = []
for i, (scores_per_image,
box_pred_per_image) in enumerate(zip(scores, box_pred)):
scores_per_image, topk_indices = scores_per_image.flatten(
0, 1).topk(
self.num_proposals, sorted=False)
labels_per_image = paddle.gather(labels, topk_indices, axis=0)
box_pred_per_image = box_pred_per_image.reshape([-1, 1, 4]).tile(
[1, self.num_classes, 1]).reshape([-1, 4])
box_pred_per_image = paddle.gather(
box_pred_per_image, topk_indices, axis=0)
classes_all.append(labels_per_image)
scores_all.append(scores_per_image)
boxes_all.append(box_pred_per_image)
bbox_num = paddle.zeros([len(scale_factor_wh)], dtype="int32")
boxes_final = []
for i in range(len(scale_factor_wh)):
classes = classes_all[i]
boxes = boxes_all[i]
scores = scores_all[i]
boxes[:, 0::2] = paddle.clip(
boxes[:, 0::2], min=0, max=img_wh[i][0]) / scale_factor_wh[i][0]
boxes[:, 1::2] = paddle.clip(
boxes[:, 1::2], min=0, max=img_wh[i][1]) / scale_factor_wh[i][1]
boxes_w, boxes_h = (boxes[:, 2] - boxes[:, 0]).numpy(), (
boxes[:, 3] - boxes[:, 1]).numpy()
keep = (boxes_w > 1.) & (boxes_h > 1.)
if (keep.sum() == 0):
bboxes = paddle.zeros([1, 6]).astype("float32")
else:
boxes = paddle.to_tensor(boxes.numpy()[keep]).astype("float32")
classes = paddle.to_tensor(classes.numpy()[keep]).astype(
"float32").unsqueeze(-1)
scores = paddle.to_tensor(scores.numpy()[keep]).astype(
"float32").unsqueeze(-1)
bboxes = paddle.concat([classes, scores, boxes], axis=-1)
boxes_final.append(bboxes)
bbox_num[i] = bboxes.shape[0]
bbox_pred = paddle.concat(boxes_final)
return bbox_pred, bbox_num
def nms(dets, thresh):
"""Apply classic DPM-style greedy NMS."""
if dets.shape[0] == 0:
return dets[[], :]
scores = dets[:, 0]
x1 = dets[:, 1]
y1 = dets[:, 2]
x2 = dets[:, 3]
y2 = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
ndets = dets.shape[0]
suppressed = np.zeros((ndets), dtype=np.int)
# nominal indices
# _i, _j
# sorted indices
# i, j
# temp variables for box i's (the box currently under consideration)
# ix1, iy1, ix2, iy2, iarea
# variables for computing overlap with box j (lower scoring box)
# xx1, yy1, xx2, yy2
# w, h
# inter, ovr
for _i in range(ndets):
i = order[_i]
if suppressed[i] == 1:
continue
ix1 = x1[i]
iy1 = y1[i]
ix2 = x2[i]
iy2 = y2[i]
iarea = areas[i]
for _j in range(_i + 1, ndets):
j = order[_j]
if suppressed[j] == 1:
continue
xx1 = max(ix1, x1[j])
yy1 = max(iy1, y1[j])
xx2 = min(ix2, x2[j])
yy2 = min(iy2, y2[j])
w = max(0.0, xx2 - xx1 + 1)
h = max(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (iarea + areas[j] - inter)
if ovr >= thresh:
suppressed[j] = 1
keep = np.where(suppressed == 0)[0]
dets = dets[keep, :]
return dets
|
__call__
|
Decode the bbox.
Args:
head_out (tuple): bbox_pred, cls_logit and masks of bbox_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [bs], and is N.
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register
from ppdet.modeling.bbox_utils import nonempty_bbox, rbox2poly
from ppdet.modeling.layers import TTFBox
from .transformers import bbox_cxcywh_to_xyxy
try:
from collections.abc import Sequence
except Exception:
from collections import Sequence
__all__ = [
'BBoxPostProcess', 'MaskPostProcess', 'FCOSPostProcess',
'S2ANetBBoxPostProcess', 'JDEBBoxPostProcess', 'CenterNetPostProcess',
'DETRBBoxPostProcess', 'SparsePostProcess'
]
@register
class BBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['decode', 'nms']
def __init__(self, num_classes=80, decode=None, nms=None):
super(BBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.decode = decode
self.nms = nms
self.fake_bboxes = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
def forward(self, head_out, rois, im_shape, scale_factor):
"""
Decode the bbox and do NMS if needed.
Args:
head_out (tuple): bbox_pred and cls_prob of bbox_head output.
rois (tuple): roi and rois_num of rpn_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
"""
if self.nms is not None:
bboxes, score = self.decode(head_out, rois, im_shape, scale_factor)
bbox_pred, bbox_num, _ = self.nms(bboxes, score, self.num_classes)
else:
bbox_pred, bbox_num = self.decode(head_out, rois, im_shape,
scale_factor)
return bbox_pred, bbox_num
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
"""
Rescale, clip and filter the bbox from the output of NMS to
get final prediction.
Notes:
Currently only support bs = 1.
Args:
bboxes (Tensor): The output bboxes with shape [N, 6] after decode
and NMS, including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
pred_result (Tensor): The final prediction results with shape [N, 6]
including labels, scores and bboxes.
"""
if bboxes.shape[0] == 0:
bboxes = self.fake_bboxes
bbox_num = self.fake_bbox_num
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.expand(origin_shape[i:i + 1, :],
[bbox_num[i], 2])
scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
scale = paddle.concat([scale_x, scale_y, scale_x, scale_y])
expand_scale = paddle.expand(scale, [bbox_num[i], 4])
origin_shape_list.append(expand_shape)
scale_factor_list.append(expand_scale)
self.origin_shape_list = paddle.concat(origin_shape_list)
scale_factor_list = paddle.concat(scale_factor_list)
# bboxes: [N, 6], label, score, bbox
pred_label = bboxes[:, 0:1]
pred_score = bboxes[:, 1:2]
pred_bbox = bboxes[:, 2:]
# rescale bbox to original image
scaled_bbox = pred_bbox / scale_factor_list
origin_h = self.origin_shape_list[:, 0]
origin_w = self.origin_shape_list[:, 1]
zeros = paddle.zeros_like(origin_h)
# clip bbox to [0, original_size]
x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros)
y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros)
x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros)
y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros)
pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1)
# filter empty bbox
keep_mask = nonempty_bbox(pred_bbox, return_mask=True)
keep_mask = paddle.unsqueeze(keep_mask, [1])
pred_label = paddle.where(keep_mask, pred_label,
paddle.ones_like(pred_label) * -1)
pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1)
return pred_result
def get_origin_shape(self, ):
return self.origin_shape_list
@register
class MaskPostProcess(object):
"""
refer to:
https://github.com/facebookresearch/detectron2/layers/mask_ops.py
Get Mask output according to the output from model
"""
def __init__(self, binary_thresh=0.5):
super(MaskPostProcess, self).__init__()
self.binary_thresh = binary_thresh
def paste_mask(self, masks, boxes, im_h, im_w):
"""
Paste the mask prediction to the original image.
"""
x0, y0, x1, y1 = paddle.split(boxes, 4, axis=1)
masks = paddle.unsqueeze(masks, [0, 1])
img_y = paddle.arange(0, im_h, dtype='float32') + 0.5
img_x = paddle.arange(0, im_w, dtype='float32') + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
img_x = paddle.unsqueeze(img_x, [1])
img_y = paddle.unsqueeze(img_y, [2])
N = boxes.shape[0]
gx = paddle.expand(img_x, [N, img_y.shape[1], img_x.shape[2]])
gy = paddle.expand(img_y, [N, img_y.shape[1], img_x.shape[2]])
grid = paddle.stack([gx, gy], axis=3)
img_masks = F.grid_sample(masks, grid, align_corners=False)
return img_masks[:, 0]
def __call__(self, mask_out, bboxes, bbox_num, origin_shape):
"""
Decode the mask_out and paste the mask to the origin image.
Args:
mask_out (Tensor): mask_head output with shape [N, 28, 28].
bbox_pred (Tensor): The output bboxes with shape [N, 6] after decode
and NMS, including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
origin_shape (Tensor): The origin shape of the input image, the tensor
shape is [N, 2], and each row is [h, w].
Returns:
pred_result (Tensor): The final prediction mask results with shape
[N, h, w] in binary mask style.
"""
num_mask = mask_out.shape[0]
origin_shape = paddle.cast(origin_shape, 'int32')
# TODO: support bs > 1 and mask output dtype is bool
pred_result = paddle.zeros(
[num_mask, origin_shape[0][0], origin_shape[0][1]], dtype='int32')
if bbox_num == 1 and bboxes[0][0] == -1:
return pred_result
# TODO: optimize chunk paste
pred_result = []
for i in range(bboxes.shape[0]):
im_h, im_w = origin_shape[i][0], origin_shape[i][1]
pred_mask = self.paste_mask(mask_out[i], bboxes[i:i + 1, 2:], im_h,
im_w)
pred_mask = pred_mask >= self.binary_thresh
pred_mask = paddle.cast(pred_mask, 'int32')
pred_result.append(pred_mask)
pred_result = paddle.concat(pred_result)
return pred_result
@register
class FCOSPostProcess(object):
__inject__ = ['decode', 'nms']
def __init__(self, decode=None, nms=None):
super(FCOSPostProcess, self).__init__()
self.decode = decode
self.nms = nms
def __call__(self, fcos_head_outs, scale_factor):
"""
Decode the bbox and do NMS in FCOS.
"""
locations, cls_logits, bboxes_reg, centerness = fcos_head_outs
bboxes, score = self.decode(locations, cls_logits, bboxes_reg,
centerness, scale_factor)
bbox_pred, bbox_num, _ = self.nms(bboxes, score)
return bbox_pred, bbox_num
@register
class S2ANetBBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['nms']
def __init__(self, num_classes=15, nms_pre=2000, min_bbox_size=0, nms=None):
super(S2ANetBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.nms_pre = paddle.to_tensor(nms_pre)
self.min_bbox_size = min_bbox_size
self.nms = nms
self.origin_shape_list = []
self.fake_pred_cls_score_bbox = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
def forward(self, pred_scores, pred_bboxes):
"""
pred_scores : [N, M] score
pred_bboxes : [N, 5] xc, yc, w, h, a
im_shape : [N, 2] im_shape
scale_factor : [N, 2] scale_factor
"""
pred_ploys0 = rbox2poly(pred_bboxes)
pred_ploys = paddle.unsqueeze(pred_ploys0, axis=0)
# pred_scores [NA, 16] --> [16, NA]
pred_scores0 = paddle.transpose(pred_scores, [1, 0])
pred_scores = paddle.unsqueeze(pred_scores0, axis=0)
pred_cls_score_bbox, bbox_num, _ = self.nms(pred_ploys, pred_scores,
self.num_classes)
# Prevent empty bbox_pred from decode or NMS.
# Bboxes and score before NMS may be empty due to the score threshold.
if pred_cls_score_bbox.shape[0] <= 0 or pred_cls_score_bbox.shape[
1] <= 1:
pred_cls_score_bbox = self.fake_pred_cls_score_bbox
bbox_num = self.fake_bbox_num
pred_cls_score_bbox = paddle.reshape(pred_cls_score_bbox, [-1, 10])
return pred_cls_score_bbox, bbox_num
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
"""
Rescale, clip and filter the bbox from the output of NMS to
get final prediction.
Args:
bboxes(Tensor): bboxes [N, 10]
bbox_num(Tensor): bbox_num
im_shape(Tensor): [1 2]
scale_factor(Tensor): [1 2]
Returns:
bbox_pred(Tensor): The output is the prediction with shape [N, 8]
including labels, scores and bboxes. The size of
bboxes are corresponding to the original image.
"""
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.expand(origin_shape[i:i + 1, :],
[bbox_num[i], 2])
scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
scale = paddle.concat([
scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x,
scale_y
])
expand_scale = paddle.expand(scale, [bbox_num[i], 8])
origin_shape_list.append(expand_shape)
scale_factor_list.append(expand_scale)
origin_shape_list = paddle.concat(origin_shape_list)
scale_factor_list = paddle.concat(scale_factor_list)
# bboxes: [N, 10], label, score, bbox
pred_label_score = bboxes[:, 0:2]
pred_bbox = bboxes[:, 2:]
# rescale bbox to original image
pred_bbox = pred_bbox.reshape([-1, 8])
scaled_bbox = pred_bbox / scale_factor_list
origin_h = origin_shape_list[:, 0]
origin_w = origin_shape_list[:, 1]
bboxes = scaled_bbox
zeros = paddle.zeros_like(origin_h)
x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], origin_w - 1), zeros)
y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], origin_h - 1), zeros)
x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], origin_w - 1), zeros)
y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], origin_h - 1), zeros)
x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], origin_w - 1), zeros)
y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], origin_h - 1), zeros)
x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], origin_w - 1), zeros)
y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], origin_h - 1), zeros)
pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=-1)
pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1)
return pred_result
@register
class JDEBBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['decode', 'nms']
def __init__(self, num_classes=1, decode=None, nms=None, return_idx=True):
super(JDEBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.decode = decode
self.nms = nms
self.return_idx = return_idx
self.fake_bbox_pred = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
self.fake_nms_keep_idx = paddle.to_tensor(
np.array(
[[0]], dtype='int32'))
self.fake_yolo_boxes_out = paddle.to_tensor(
np.array(
[[[0.0, 0.0, 0.0, 0.0]]], dtype='float32'))
self.fake_yolo_scores_out = paddle.to_tensor(
np.array(
[[[0.0]]], dtype='float32'))
self.fake_boxes_idx = paddle.to_tensor(np.array([[0]], dtype='int64'))
def forward(self, head_out, anchors):
"""
Decode the bbox and do NMS for JDE model.
Args:
head_out (list): Bbox_pred and cls_prob of bbox_head output.
anchors (list): Anchors of JDE model.
Returns:
boxes_idx (Tensor): The index of kept bboxes after decode 'JDEBox'.
bbox_pred (Tensor): The output is the prediction with shape [N, 6]
including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction of each batch with shape [N].
nms_keep_idx (Tensor): The index of kept bboxes after NMS.
"""
boxes_idx, yolo_boxes_scores = self.decode(head_out, anchors)
if len(boxes_idx) == 0:
boxes_idx = self.fake_boxes_idx
yolo_boxes_out = self.fake_yolo_boxes_out
yolo_scores_out = self.fake_yolo_scores_out
else:
yolo_boxes = paddle.gather_nd(yolo_boxes_scores, boxes_idx)
# TODO: only support bs=1 now
yolo_boxes_out = paddle.reshape(
yolo_boxes[:, :4], shape=[1, len(boxes_idx), 4])
yolo_scores_out = paddle.reshape(
yolo_boxes[:, 4:5], shape=[1, 1, len(boxes_idx)])
boxes_idx = boxes_idx[:, 1:]
if self.return_idx:
bbox_pred, bbox_num, nms_keep_idx = self.nms(
yolo_boxes_out, yolo_scores_out, self.num_classes)
if bbox_pred.shape[0] == 0:
bbox_pred = self.fake_bbox_pred
bbox_num = self.fake_bbox_num
nms_keep_idx = self.fake_nms_keep_idx
return boxes_idx, bbox_pred, bbox_num, nms_keep_idx
else:
bbox_pred, bbox_num, _ = self.nms(yolo_boxes_out, yolo_scores_out,
self.num_classes)
if bbox_pred.shape[0] == 0:
bbox_pred = self.fake_bbox_pred
bbox_num = self.fake_bbox_num
return _, bbox_pred, bbox_num, _
@register
class CenterNetPostProcess(TTFBox):
"""
Postprocess the model outputs to get final prediction:
1. Do NMS for heatmap to get top `max_per_img` bboxes.
2. Decode bboxes using center offset and box size.
3. Rescale decoded bboxes reference to the origin image shape.
Args:
max_per_img(int): the maximum number of predicted objects in a image,
500 by default.
down_ratio(int): the down ratio from images to heatmap, 4 by default.
regress_ltrb (bool): whether to regress left/top/right/bottom or
width/height for a box, true by default.
for_mot (bool): whether return other features used in tracking model.
"""
__shared__ = ['down_ratio', 'for_mot']
def __init__(self,
max_per_img=500,
down_ratio=4,
regress_ltrb=True,
for_mot=False):
super(TTFBox, self).__init__()
self.max_per_img = max_per_img
self.down_ratio = down_ratio
self.regress_ltrb = regress_ltrb
self.for_mot = for_mot
def __call__(self, hm, wh, reg, im_shape, scale_factor):
heat = self._simple_nms(hm)
scores, inds, topk_clses, ys, xs = self._topk(heat)
scores = paddle.tensor.unsqueeze(scores, [1])
clses = paddle.tensor.unsqueeze(topk_clses, [1])
reg_t = paddle.transpose(reg, [0, 2, 3, 1])
# Like TTFBox, batch size is 1.
# TODO: support batch size > 1
reg = paddle.reshape(reg_t, [-1, paddle.shape(reg_t)[-1]])
reg = paddle.gather(reg, inds)
xs = paddle.cast(xs, 'float32')
ys = paddle.cast(ys, 'float32')
xs = xs + reg[:, 0:1]
ys = ys + reg[:, 1:2]
wh_t = paddle.transpose(wh, [0, 2, 3, 1])
wh = paddle.reshape(wh_t, [-1, paddle.shape(wh_t)[-1]])
wh = paddle.gather(wh, inds)
if self.regress_ltrb:
x1 = xs - wh[:, 0:1]
y1 = ys - wh[:, 1:2]
x2 = xs + wh[:, 2:3]
y2 = ys + wh[:, 3:4]
else:
x1 = xs - wh[:, 0:1] / 2
y1 = ys - wh[:, 1:2] / 2
x2 = xs + wh[:, 0:1] / 2
y2 = ys + wh[:, 1:2] / 2
n, c, feat_h, feat_w = hm.shape[:]
padw = (feat_w * self.down_ratio - im_shape[0, 1]) / 2
padh = (feat_h * self.down_ratio - im_shape[0, 0]) / 2
x1 = x1 * self.down_ratio
y1 = y1 * self.down_ratio
x2 = x2 * self.down_ratio
y2 = y2 * self.down_ratio
x1 = x1 - padw
y1 = y1 - padh
x2 = x2 - padw
y2 = y2 - padh
bboxes = paddle.concat([x1, y1, x2, y2], axis=1)
scale_y = scale_factor[:, 0:1]
scale_x = scale_factor[:, 1:2]
scale_expand = paddle.concat(
[scale_x, scale_y, scale_x, scale_y], axis=1)
boxes_shape = paddle.shape(bboxes)
boxes_shape.stop_gradient = True
scale_expand = paddle.expand(scale_expand, shape=boxes_shape)
bboxes = paddle.divide(bboxes, scale_expand)
if self.for_mot:
results = paddle.concat([bboxes, scores, clses], axis=1)
return results, inds, topk_clses
else:
results = paddle.concat([clses, scores, bboxes], axis=1)
return results, paddle.shape(results)[0:1], topk_clses
@register
class DETRBBoxPostProcess(object):
__shared__ = ['num_classes', 'use_focal_loss']
__inject__ = []
def __init__(self,
num_classes=80,
num_top_queries=100,
use_focal_loss=False):
super(DETRBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.num_top_queries = num_top_queries
self.use_focal_loss = use_focal_loss
# MASKED: __call__ function (lines 515-570)
@register
class SparsePostProcess(object):
__shared__ = ['num_classes']
def __init__(self, num_proposals, num_classes=80):
super(SparsePostProcess, self).__init__()
self.num_classes = num_classes
self.num_proposals = num_proposals
def __call__(self, box_cls, box_pred, scale_factor_wh, img_whwh):
"""
Arguments:
box_cls (Tensor): tensor of shape (batch_size, num_proposals, K).
The tensor predicts the classification probability for each proposal.
box_pred (Tensor): tensors of shape (batch_size, num_proposals, 4).
The tensor predicts 4-vector (x,y,w,h) box
regression values for every proposal
scale_factor_wh (Tensor): tensors of shape [batch_size, 2] the scalor of per img
img_whwh (Tensor): tensors of shape [batch_size, 4]
Returns:
bbox_pred (Tensor): tensors of shape [num_boxes, 6] Each row has 6 values:
[label, confidence, xmin, ymin, xmax, ymax]
bbox_num (Tensor): tensors of shape [batch_size] the number of RoIs in each image.
"""
assert len(box_cls) == len(scale_factor_wh) == len(img_whwh)
img_wh = img_whwh[:, :2]
scores = F.sigmoid(box_cls)
labels = paddle.arange(0, self.num_classes). \
unsqueeze(0).tile([self.num_proposals, 1]).flatten(start_axis=0, stop_axis=1)
classes_all = []
scores_all = []
boxes_all = []
for i, (scores_per_image,
box_pred_per_image) in enumerate(zip(scores, box_pred)):
scores_per_image, topk_indices = scores_per_image.flatten(
0, 1).topk(
self.num_proposals, sorted=False)
labels_per_image = paddle.gather(labels, topk_indices, axis=0)
box_pred_per_image = box_pred_per_image.reshape([-1, 1, 4]).tile(
[1, self.num_classes, 1]).reshape([-1, 4])
box_pred_per_image = paddle.gather(
box_pred_per_image, topk_indices, axis=0)
classes_all.append(labels_per_image)
scores_all.append(scores_per_image)
boxes_all.append(box_pred_per_image)
bbox_num = paddle.zeros([len(scale_factor_wh)], dtype="int32")
boxes_final = []
for i in range(len(scale_factor_wh)):
classes = classes_all[i]
boxes = boxes_all[i]
scores = scores_all[i]
boxes[:, 0::2] = paddle.clip(
boxes[:, 0::2], min=0, max=img_wh[i][0]) / scale_factor_wh[i][0]
boxes[:, 1::2] = paddle.clip(
boxes[:, 1::2], min=0, max=img_wh[i][1]) / scale_factor_wh[i][1]
boxes_w, boxes_h = (boxes[:, 2] - boxes[:, 0]).numpy(), (
boxes[:, 3] - boxes[:, 1]).numpy()
keep = (boxes_w > 1.) & (boxes_h > 1.)
if (keep.sum() == 0):
bboxes = paddle.zeros([1, 6]).astype("float32")
else:
boxes = paddle.to_tensor(boxes.numpy()[keep]).astype("float32")
classes = paddle.to_tensor(classes.numpy()[keep]).astype(
"float32").unsqueeze(-1)
scores = paddle.to_tensor(scores.numpy()[keep]).astype(
"float32").unsqueeze(-1)
bboxes = paddle.concat([classes, scores, boxes], axis=-1)
boxes_final.append(bboxes)
bbox_num[i] = bboxes.shape[0]
bbox_pred = paddle.concat(boxes_final)
return bbox_pred, bbox_num
def nms(dets, thresh):
"""Apply classic DPM-style greedy NMS."""
if dets.shape[0] == 0:
return dets[[], :]
scores = dets[:, 0]
x1 = dets[:, 1]
y1 = dets[:, 2]
x2 = dets[:, 3]
y2 = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
ndets = dets.shape[0]
suppressed = np.zeros((ndets), dtype=np.int)
# nominal indices
# _i, _j
# sorted indices
# i, j
# temp variables for box i's (the box currently under consideration)
# ix1, iy1, ix2, iy2, iarea
# variables for computing overlap with box j (lower scoring box)
# xx1, yy1, xx2, yy2
# w, h
# inter, ovr
for _i in range(ndets):
i = order[_i]
if suppressed[i] == 1:
continue
ix1 = x1[i]
iy1 = y1[i]
ix2 = x2[i]
iy2 = y2[i]
iarea = areas[i]
for _j in range(_i + 1, ndets):
j = order[_j]
if suppressed[j] == 1:
continue
xx1 = max(ix1, x1[j])
yy1 = max(iy1, y1[j])
xx2 = min(ix2, x2[j])
yy2 = min(iy2, y2[j])
w = max(0.0, xx2 - xx1 + 1)
h = max(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (iarea + areas[j] - inter)
if ovr >= thresh:
suppressed[j] = 1
keep = np.where(suppressed == 0)[0]
dets = dets[keep, :]
return dets
|
def __call__(self, head_out, im_shape, scale_factor):
"""
Decode the bbox.
Args:
head_out (tuple): bbox_pred, cls_logit and masks of bbox_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [bs], and is N.
"""
bboxes, logits, masks = head_out
bbox_pred = bbox_cxcywh_to_xyxy(bboxes)
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
img_h, img_w = origin_shape.unbind(1)
origin_shape = paddle.stack(
[img_w, img_h, img_w, img_h], axis=-1).unsqueeze(0)
bbox_pred *= origin_shape
scores = F.sigmoid(logits) if self.use_focal_loss else F.softmax(
logits)[:, :, :-1]
if not self.use_focal_loss:
scores, labels = scores.max(-1), scores.argmax(-1)
if scores.shape[1] > self.num_top_queries:
scores, index = paddle.topk(
scores, self.num_top_queries, axis=-1)
labels = paddle.stack(
[paddle.gather(l, i) for l, i in zip(labels, index)])
bbox_pred = paddle.stack(
[paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
else:
scores, index = paddle.topk(
scores.reshape([logits.shape[0], -1]),
self.num_top_queries,
axis=-1)
labels = index % logits.shape[2]
index = index // logits.shape[2]
bbox_pred = paddle.stack(
[paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
bbox_pred = paddle.concat(
[
labels.unsqueeze(-1).astype('float32'), scores.unsqueeze(-1),
bbox_pred
],
axis=-1)
bbox_num = paddle.to_tensor(
bbox_pred.shape[1], dtype='int32').tile([bbox_pred.shape[0]])
bbox_pred = bbox_pred.reshape([-1, 6])
return bbox_pred, bbox_num
| 515 | 570 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register
from ppdet.modeling.bbox_utils import nonempty_bbox, rbox2poly
from ppdet.modeling.layers import TTFBox
from .transformers import bbox_cxcywh_to_xyxy
try:
from collections.abc import Sequence
except Exception:
from collections import Sequence
__all__ = [
'BBoxPostProcess', 'MaskPostProcess', 'FCOSPostProcess',
'S2ANetBBoxPostProcess', 'JDEBBoxPostProcess', 'CenterNetPostProcess',
'DETRBBoxPostProcess', 'SparsePostProcess'
]
@register
class BBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['decode', 'nms']
def __init__(self, num_classes=80, decode=None, nms=None):
super(BBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.decode = decode
self.nms = nms
self.fake_bboxes = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
def forward(self, head_out, rois, im_shape, scale_factor):
"""
Decode the bbox and do NMS if needed.
Args:
head_out (tuple): bbox_pred and cls_prob of bbox_head output.
rois (tuple): roi and rois_num of rpn_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
"""
if self.nms is not None:
bboxes, score = self.decode(head_out, rois, im_shape, scale_factor)
bbox_pred, bbox_num, _ = self.nms(bboxes, score, self.num_classes)
else:
bbox_pred, bbox_num = self.decode(head_out, rois, im_shape,
scale_factor)
return bbox_pred, bbox_num
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
"""
Rescale, clip and filter the bbox from the output of NMS to
get final prediction.
Notes:
Currently only support bs = 1.
Args:
bboxes (Tensor): The output bboxes with shape [N, 6] after decode
and NMS, including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
pred_result (Tensor): The final prediction results with shape [N, 6]
including labels, scores and bboxes.
"""
if bboxes.shape[0] == 0:
bboxes = self.fake_bboxes
bbox_num = self.fake_bbox_num
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.expand(origin_shape[i:i + 1, :],
[bbox_num[i], 2])
scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
scale = paddle.concat([scale_x, scale_y, scale_x, scale_y])
expand_scale = paddle.expand(scale, [bbox_num[i], 4])
origin_shape_list.append(expand_shape)
scale_factor_list.append(expand_scale)
self.origin_shape_list = paddle.concat(origin_shape_list)
scale_factor_list = paddle.concat(scale_factor_list)
# bboxes: [N, 6], label, score, bbox
pred_label = bboxes[:, 0:1]
pred_score = bboxes[:, 1:2]
pred_bbox = bboxes[:, 2:]
# rescale bbox to original image
scaled_bbox = pred_bbox / scale_factor_list
origin_h = self.origin_shape_list[:, 0]
origin_w = self.origin_shape_list[:, 1]
zeros = paddle.zeros_like(origin_h)
# clip bbox to [0, original_size]
x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros)
y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros)
x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros)
y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros)
pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1)
# filter empty bbox
keep_mask = nonempty_bbox(pred_bbox, return_mask=True)
keep_mask = paddle.unsqueeze(keep_mask, [1])
pred_label = paddle.where(keep_mask, pred_label,
paddle.ones_like(pred_label) * -1)
pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1)
return pred_result
def get_origin_shape(self, ):
return self.origin_shape_list
@register
class MaskPostProcess(object):
"""
refer to:
https://github.com/facebookresearch/detectron2/layers/mask_ops.py
Get Mask output according to the output from model
"""
def __init__(self, binary_thresh=0.5):
super(MaskPostProcess, self).__init__()
self.binary_thresh = binary_thresh
def paste_mask(self, masks, boxes, im_h, im_w):
"""
Paste the mask prediction to the original image.
"""
x0, y0, x1, y1 = paddle.split(boxes, 4, axis=1)
masks = paddle.unsqueeze(masks, [0, 1])
img_y = paddle.arange(0, im_h, dtype='float32') + 0.5
img_x = paddle.arange(0, im_w, dtype='float32') + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
img_x = paddle.unsqueeze(img_x, [1])
img_y = paddle.unsqueeze(img_y, [2])
N = boxes.shape[0]
gx = paddle.expand(img_x, [N, img_y.shape[1], img_x.shape[2]])
gy = paddle.expand(img_y, [N, img_y.shape[1], img_x.shape[2]])
grid = paddle.stack([gx, gy], axis=3)
img_masks = F.grid_sample(masks, grid, align_corners=False)
return img_masks[:, 0]
def __call__(self, mask_out, bboxes, bbox_num, origin_shape):
"""
Decode the mask_out and paste the mask to the origin image.
Args:
mask_out (Tensor): mask_head output with shape [N, 28, 28].
bbox_pred (Tensor): The output bboxes with shape [N, 6] after decode
and NMS, including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
origin_shape (Tensor): The origin shape of the input image, the tensor
shape is [N, 2], and each row is [h, w].
Returns:
pred_result (Tensor): The final prediction mask results with shape
[N, h, w] in binary mask style.
"""
num_mask = mask_out.shape[0]
origin_shape = paddle.cast(origin_shape, 'int32')
# TODO: support bs > 1 and mask output dtype is bool
pred_result = paddle.zeros(
[num_mask, origin_shape[0][0], origin_shape[0][1]], dtype='int32')
if bbox_num == 1 and bboxes[0][0] == -1:
return pred_result
# TODO: optimize chunk paste
pred_result = []
for i in range(bboxes.shape[0]):
im_h, im_w = origin_shape[i][0], origin_shape[i][1]
pred_mask = self.paste_mask(mask_out[i], bboxes[i:i + 1, 2:], im_h,
im_w)
pred_mask = pred_mask >= self.binary_thresh
pred_mask = paddle.cast(pred_mask, 'int32')
pred_result.append(pred_mask)
pred_result = paddle.concat(pred_result)
return pred_result
@register
class FCOSPostProcess(object):
__inject__ = ['decode', 'nms']
def __init__(self, decode=None, nms=None):
super(FCOSPostProcess, self).__init__()
self.decode = decode
self.nms = nms
def __call__(self, fcos_head_outs, scale_factor):
"""
Decode the bbox and do NMS in FCOS.
"""
locations, cls_logits, bboxes_reg, centerness = fcos_head_outs
bboxes, score = self.decode(locations, cls_logits, bboxes_reg,
centerness, scale_factor)
bbox_pred, bbox_num, _ = self.nms(bboxes, score)
return bbox_pred, bbox_num
@register
class S2ANetBBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['nms']
def __init__(self, num_classes=15, nms_pre=2000, min_bbox_size=0, nms=None):
super(S2ANetBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.nms_pre = paddle.to_tensor(nms_pre)
self.min_bbox_size = min_bbox_size
self.nms = nms
self.origin_shape_list = []
self.fake_pred_cls_score_bbox = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
def forward(self, pred_scores, pred_bboxes):
"""
pred_scores : [N, M] score
pred_bboxes : [N, 5] xc, yc, w, h, a
im_shape : [N, 2] im_shape
scale_factor : [N, 2] scale_factor
"""
pred_ploys0 = rbox2poly(pred_bboxes)
pred_ploys = paddle.unsqueeze(pred_ploys0, axis=0)
# pred_scores [NA, 16] --> [16, NA]
pred_scores0 = paddle.transpose(pred_scores, [1, 0])
pred_scores = paddle.unsqueeze(pred_scores0, axis=0)
pred_cls_score_bbox, bbox_num, _ = self.nms(pred_ploys, pred_scores,
self.num_classes)
# Prevent empty bbox_pred from decode or NMS.
# Bboxes and score before NMS may be empty due to the score threshold.
if pred_cls_score_bbox.shape[0] <= 0 or pred_cls_score_bbox.shape[
1] <= 1:
pred_cls_score_bbox = self.fake_pred_cls_score_bbox
bbox_num = self.fake_bbox_num
pred_cls_score_bbox = paddle.reshape(pred_cls_score_bbox, [-1, 10])
return pred_cls_score_bbox, bbox_num
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
"""
Rescale, clip and filter the bbox from the output of NMS to
get final prediction.
Args:
bboxes(Tensor): bboxes [N, 10]
bbox_num(Tensor): bbox_num
im_shape(Tensor): [1 2]
scale_factor(Tensor): [1 2]
Returns:
bbox_pred(Tensor): The output is the prediction with shape [N, 8]
including labels, scores and bboxes. The size of
bboxes are corresponding to the original image.
"""
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.expand(origin_shape[i:i + 1, :],
[bbox_num[i], 2])
scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
scale = paddle.concat([
scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x,
scale_y
])
expand_scale = paddle.expand(scale, [bbox_num[i], 8])
origin_shape_list.append(expand_shape)
scale_factor_list.append(expand_scale)
origin_shape_list = paddle.concat(origin_shape_list)
scale_factor_list = paddle.concat(scale_factor_list)
# bboxes: [N, 10], label, score, bbox
pred_label_score = bboxes[:, 0:2]
pred_bbox = bboxes[:, 2:]
# rescale bbox to original image
pred_bbox = pred_bbox.reshape([-1, 8])
scaled_bbox = pred_bbox / scale_factor_list
origin_h = origin_shape_list[:, 0]
origin_w = origin_shape_list[:, 1]
bboxes = scaled_bbox
zeros = paddle.zeros_like(origin_h)
x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], origin_w - 1), zeros)
y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], origin_h - 1), zeros)
x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], origin_w - 1), zeros)
y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], origin_h - 1), zeros)
x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], origin_w - 1), zeros)
y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], origin_h - 1), zeros)
x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], origin_w - 1), zeros)
y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], origin_h - 1), zeros)
pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=-1)
pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1)
return pred_result
@register
class JDEBBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['decode', 'nms']
def __init__(self, num_classes=1, decode=None, nms=None, return_idx=True):
super(JDEBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.decode = decode
self.nms = nms
self.return_idx = return_idx
self.fake_bbox_pred = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
self.fake_nms_keep_idx = paddle.to_tensor(
np.array(
[[0]], dtype='int32'))
self.fake_yolo_boxes_out = paddle.to_tensor(
np.array(
[[[0.0, 0.0, 0.0, 0.0]]], dtype='float32'))
self.fake_yolo_scores_out = paddle.to_tensor(
np.array(
[[[0.0]]], dtype='float32'))
self.fake_boxes_idx = paddle.to_tensor(np.array([[0]], dtype='int64'))
def forward(self, head_out, anchors):
"""
Decode the bbox and do NMS for JDE model.
Args:
head_out (list): Bbox_pred and cls_prob of bbox_head output.
anchors (list): Anchors of JDE model.
Returns:
boxes_idx (Tensor): The index of kept bboxes after decode 'JDEBox'.
bbox_pred (Tensor): The output is the prediction with shape [N, 6]
including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction of each batch with shape [N].
nms_keep_idx (Tensor): The index of kept bboxes after NMS.
"""
boxes_idx, yolo_boxes_scores = self.decode(head_out, anchors)
if len(boxes_idx) == 0:
boxes_idx = self.fake_boxes_idx
yolo_boxes_out = self.fake_yolo_boxes_out
yolo_scores_out = self.fake_yolo_scores_out
else:
yolo_boxes = paddle.gather_nd(yolo_boxes_scores, boxes_idx)
# TODO: only support bs=1 now
yolo_boxes_out = paddle.reshape(
yolo_boxes[:, :4], shape=[1, len(boxes_idx), 4])
yolo_scores_out = paddle.reshape(
yolo_boxes[:, 4:5], shape=[1, 1, len(boxes_idx)])
boxes_idx = boxes_idx[:, 1:]
if self.return_idx:
bbox_pred, bbox_num, nms_keep_idx = self.nms(
yolo_boxes_out, yolo_scores_out, self.num_classes)
if bbox_pred.shape[0] == 0:
bbox_pred = self.fake_bbox_pred
bbox_num = self.fake_bbox_num
nms_keep_idx = self.fake_nms_keep_idx
return boxes_idx, bbox_pred, bbox_num, nms_keep_idx
else:
bbox_pred, bbox_num, _ = self.nms(yolo_boxes_out, yolo_scores_out,
self.num_classes)
if bbox_pred.shape[0] == 0:
bbox_pred = self.fake_bbox_pred
bbox_num = self.fake_bbox_num
return _, bbox_pred, bbox_num, _
@register
class CenterNetPostProcess(TTFBox):
"""
Postprocess the model outputs to get final prediction:
1. Do NMS for heatmap to get top `max_per_img` bboxes.
2. Decode bboxes using center offset and box size.
3. Rescale decoded bboxes reference to the origin image shape.
Args:
max_per_img(int): the maximum number of predicted objects in a image,
500 by default.
down_ratio(int): the down ratio from images to heatmap, 4 by default.
regress_ltrb (bool): whether to regress left/top/right/bottom or
width/height for a box, true by default.
for_mot (bool): whether return other features used in tracking model.
"""
__shared__ = ['down_ratio', 'for_mot']
def __init__(self,
max_per_img=500,
down_ratio=4,
regress_ltrb=True,
for_mot=False):
super(TTFBox, self).__init__()
self.max_per_img = max_per_img
self.down_ratio = down_ratio
self.regress_ltrb = regress_ltrb
self.for_mot = for_mot
def __call__(self, hm, wh, reg, im_shape, scale_factor):
heat = self._simple_nms(hm)
scores, inds, topk_clses, ys, xs = self._topk(heat)
scores = paddle.tensor.unsqueeze(scores, [1])
clses = paddle.tensor.unsqueeze(topk_clses, [1])
reg_t = paddle.transpose(reg, [0, 2, 3, 1])
# Like TTFBox, batch size is 1.
# TODO: support batch size > 1
reg = paddle.reshape(reg_t, [-1, paddle.shape(reg_t)[-1]])
reg = paddle.gather(reg, inds)
xs = paddle.cast(xs, 'float32')
ys = paddle.cast(ys, 'float32')
xs = xs + reg[:, 0:1]
ys = ys + reg[:, 1:2]
wh_t = paddle.transpose(wh, [0, 2, 3, 1])
wh = paddle.reshape(wh_t, [-1, paddle.shape(wh_t)[-1]])
wh = paddle.gather(wh, inds)
if self.regress_ltrb:
x1 = xs - wh[:, 0:1]
y1 = ys - wh[:, 1:2]
x2 = xs + wh[:, 2:3]
y2 = ys + wh[:, 3:4]
else:
x1 = xs - wh[:, 0:1] / 2
y1 = ys - wh[:, 1:2] / 2
x2 = xs + wh[:, 0:1] / 2
y2 = ys + wh[:, 1:2] / 2
n, c, feat_h, feat_w = hm.shape[:]
padw = (feat_w * self.down_ratio - im_shape[0, 1]) / 2
padh = (feat_h * self.down_ratio - im_shape[0, 0]) / 2
x1 = x1 * self.down_ratio
y1 = y1 * self.down_ratio
x2 = x2 * self.down_ratio
y2 = y2 * self.down_ratio
x1 = x1 - padw
y1 = y1 - padh
x2 = x2 - padw
y2 = y2 - padh
bboxes = paddle.concat([x1, y1, x2, y2], axis=1)
scale_y = scale_factor[:, 0:1]
scale_x = scale_factor[:, 1:2]
scale_expand = paddle.concat(
[scale_x, scale_y, scale_x, scale_y], axis=1)
boxes_shape = paddle.shape(bboxes)
boxes_shape.stop_gradient = True
scale_expand = paddle.expand(scale_expand, shape=boxes_shape)
bboxes = paddle.divide(bboxes, scale_expand)
if self.for_mot:
results = paddle.concat([bboxes, scores, clses], axis=1)
return results, inds, topk_clses
else:
results = paddle.concat([clses, scores, bboxes], axis=1)
return results, paddle.shape(results)[0:1], topk_clses
@register
class DETRBBoxPostProcess(object):
__shared__ = ['num_classes', 'use_focal_loss']
__inject__ = []
def __init__(self,
num_classes=80,
num_top_queries=100,
use_focal_loss=False):
super(DETRBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.num_top_queries = num_top_queries
self.use_focal_loss = use_focal_loss
def __call__(self, head_out, im_shape, scale_factor):
"""
Decode the bbox.
Args:
head_out (tuple): bbox_pred, cls_logit and masks of bbox_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [bs], and is N.
"""
bboxes, logits, masks = head_out
bbox_pred = bbox_cxcywh_to_xyxy(bboxes)
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
img_h, img_w = origin_shape.unbind(1)
origin_shape = paddle.stack(
[img_w, img_h, img_w, img_h], axis=-1).unsqueeze(0)
bbox_pred *= origin_shape
scores = F.sigmoid(logits) if self.use_focal_loss else F.softmax(
logits)[:, :, :-1]
if not self.use_focal_loss:
scores, labels = scores.max(-1), scores.argmax(-1)
if scores.shape[1] > self.num_top_queries:
scores, index = paddle.topk(
scores, self.num_top_queries, axis=-1)
labels = paddle.stack(
[paddle.gather(l, i) for l, i in zip(labels, index)])
bbox_pred = paddle.stack(
[paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
else:
scores, index = paddle.topk(
scores.reshape([logits.shape[0], -1]),
self.num_top_queries,
axis=-1)
labels = index % logits.shape[2]
index = index // logits.shape[2]
bbox_pred = paddle.stack(
[paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
bbox_pred = paddle.concat(
[
labels.unsqueeze(-1).astype('float32'), scores.unsqueeze(-1),
bbox_pred
],
axis=-1)
bbox_num = paddle.to_tensor(
bbox_pred.shape[1], dtype='int32').tile([bbox_pred.shape[0]])
bbox_pred = bbox_pred.reshape([-1, 6])
return bbox_pred, bbox_num
@register
class SparsePostProcess(object):
__shared__ = ['num_classes']
def __init__(self, num_proposals, num_classes=80):
super(SparsePostProcess, self).__init__()
self.num_classes = num_classes
self.num_proposals = num_proposals
def __call__(self, box_cls, box_pred, scale_factor_wh, img_whwh):
"""
Arguments:
box_cls (Tensor): tensor of shape (batch_size, num_proposals, K).
The tensor predicts the classification probability for each proposal.
box_pred (Tensor): tensors of shape (batch_size, num_proposals, 4).
The tensor predicts 4-vector (x,y,w,h) box
regression values for every proposal
scale_factor_wh (Tensor): tensors of shape [batch_size, 2] the scalor of per img
img_whwh (Tensor): tensors of shape [batch_size, 4]
Returns:
bbox_pred (Tensor): tensors of shape [num_boxes, 6] Each row has 6 values:
[label, confidence, xmin, ymin, xmax, ymax]
bbox_num (Tensor): tensors of shape [batch_size] the number of RoIs in each image.
"""
assert len(box_cls) == len(scale_factor_wh) == len(img_whwh)
img_wh = img_whwh[:, :2]
scores = F.sigmoid(box_cls)
labels = paddle.arange(0, self.num_classes). \
unsqueeze(0).tile([self.num_proposals, 1]).flatten(start_axis=0, stop_axis=1)
classes_all = []
scores_all = []
boxes_all = []
for i, (scores_per_image,
box_pred_per_image) in enumerate(zip(scores, box_pred)):
scores_per_image, topk_indices = scores_per_image.flatten(
0, 1).topk(
self.num_proposals, sorted=False)
labels_per_image = paddle.gather(labels, topk_indices, axis=0)
box_pred_per_image = box_pred_per_image.reshape([-1, 1, 4]).tile(
[1, self.num_classes, 1]).reshape([-1, 4])
box_pred_per_image = paddle.gather(
box_pred_per_image, topk_indices, axis=0)
classes_all.append(labels_per_image)
scores_all.append(scores_per_image)
boxes_all.append(box_pred_per_image)
bbox_num = paddle.zeros([len(scale_factor_wh)], dtype="int32")
boxes_final = []
for i in range(len(scale_factor_wh)):
classes = classes_all[i]
boxes = boxes_all[i]
scores = scores_all[i]
boxes[:, 0::2] = paddle.clip(
boxes[:, 0::2], min=0, max=img_wh[i][0]) / scale_factor_wh[i][0]
boxes[:, 1::2] = paddle.clip(
boxes[:, 1::2], min=0, max=img_wh[i][1]) / scale_factor_wh[i][1]
boxes_w, boxes_h = (boxes[:, 2] - boxes[:, 0]).numpy(), (
boxes[:, 3] - boxes[:, 1]).numpy()
keep = (boxes_w > 1.) & (boxes_h > 1.)
if (keep.sum() == 0):
bboxes = paddle.zeros([1, 6]).astype("float32")
else:
boxes = paddle.to_tensor(boxes.numpy()[keep]).astype("float32")
classes = paddle.to_tensor(classes.numpy()[keep]).astype(
"float32").unsqueeze(-1)
scores = paddle.to_tensor(scores.numpy()[keep]).astype(
"float32").unsqueeze(-1)
bboxes = paddle.concat([classes, scores, boxes], axis=-1)
boxes_final.append(bboxes)
bbox_num[i] = bboxes.shape[0]
bbox_pred = paddle.concat(boxes_final)
return bbox_pred, bbox_num
def nms(dets, thresh):
"""Apply classic DPM-style greedy NMS."""
if dets.shape[0] == 0:
return dets[[], :]
scores = dets[:, 0]
x1 = dets[:, 1]
y1 = dets[:, 2]
x2 = dets[:, 3]
y2 = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
ndets = dets.shape[0]
suppressed = np.zeros((ndets), dtype=np.int)
# nominal indices
# _i, _j
# sorted indices
# i, j
# temp variables for box i's (the box currently under consideration)
# ix1, iy1, ix2, iy2, iarea
# variables for computing overlap with box j (lower scoring box)
# xx1, yy1, xx2, yy2
# w, h
# inter, ovr
for _i in range(ndets):
i = order[_i]
if suppressed[i] == 1:
continue
ix1 = x1[i]
iy1 = y1[i]
ix2 = x2[i]
iy2 = y2[i]
iarea = areas[i]
for _j in range(_i + 1, ndets):
j = order[_j]
if suppressed[j] == 1:
continue
xx1 = max(ix1, x1[j])
yy1 = max(iy1, y1[j])
xx2 = min(ix2, x2[j])
yy2 = min(iy2, y2[j])
w = max(0.0, xx2 - xx1 + 1)
h = max(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (iarea + areas[j] - inter)
if ovr >= thresh:
suppressed[j] = 1
keep = np.where(suppressed == 0)[0]
dets = dets[keep, :]
return dets
|
authenticated_view
|
This view can be used to test requests with an authenticated user. Create a
user with a default username, save it and then use this user to log in.
Always returns a 200.
|
from django.conf.urls import url
from django.contrib.auth import login
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.views.decorators.cache import cache_page
from django.urls import include, path, re_path
from .. import views
def repath_view(request):
return HttpResponse(status=200)
def path_view(request):
return HttpResponse(status=200)
# MASKED: authenticated_view function (lines 19-29)
urlpatterns = [
url(r"^$", views.index),
url(r"^simple/$", views.BasicView.as_view()),
url(r"^users/$", views.UserList.as_view(), name="users-list"),
url(r"^cached-template/$", views.TemplateCachedUserList.as_view(), name="cached-template-list"),
url(r"^cached-users/$", cache_page(60)(views.UserList.as_view()), name="cached-users-list"),
url(r"^fail-view/$", views.ForbiddenView.as_view(), name="forbidden-view"),
url(r"^authenticated/$", authenticated_view, name="authenticated-view"),
url(r"^static-method-view/$", views.StaticMethodView.as_view(), name="static-method-view"),
url(r"^fn-view/$", views.function_view, name="fn-view"),
url(r"^feed-view/$", views.FeedView(), name="feed-view"),
url(r"^partial-view/$", views.partial_view, name="partial-view"),
url(r"^lambda-view/$", views.lambda_view, name="lambda-view"),
url(r"^error-500/$", views.error_500, name="error-500"),
re_path(r"re-path.*/", repath_view),
path("path/", path_view),
path("include/", include("tests.contrib.django.django_app.extra_urls")),
]
|
def authenticated_view(request):
"""
This view can be used to test requests with an authenticated user. Create a
user with a default username, save it and then use this user to log in.
Always returns a 200.
"""
user = User(username="Jane Doe")
user.save()
login(request, user)
return HttpResponse(status=200)
| 19 | 29 |
from django.conf.urls import url
from django.contrib.auth import login
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.views.decorators.cache import cache_page
from django.urls import include, path, re_path
from .. import views
def repath_view(request):
return HttpResponse(status=200)
def path_view(request):
return HttpResponse(status=200)
def authenticated_view(request):
"""
This view can be used to test requests with an authenticated user. Create a
user with a default username, save it and then use this user to log in.
Always returns a 200.
"""
user = User(username="Jane Doe")
user.save()
login(request, user)
return HttpResponse(status=200)
urlpatterns = [
url(r"^$", views.index),
url(r"^simple/$", views.BasicView.as_view()),
url(r"^users/$", views.UserList.as_view(), name="users-list"),
url(r"^cached-template/$", views.TemplateCachedUserList.as_view(), name="cached-template-list"),
url(r"^cached-users/$", cache_page(60)(views.UserList.as_view()), name="cached-users-list"),
url(r"^fail-view/$", views.ForbiddenView.as_view(), name="forbidden-view"),
url(r"^authenticated/$", authenticated_view, name="authenticated-view"),
url(r"^static-method-view/$", views.StaticMethodView.as_view(), name="static-method-view"),
url(r"^fn-view/$", views.function_view, name="fn-view"),
url(r"^feed-view/$", views.FeedView(), name="feed-view"),
url(r"^partial-view/$", views.partial_view, name="partial-view"),
url(r"^lambda-view/$", views.lambda_view, name="lambda-view"),
url(r"^error-500/$", views.error_500, name="error-500"),
re_path(r"re-path.*/", repath_view),
path("path/", path_view),
path("include/", include("tests.contrib.django.django_app.extra_urls")),
]
|
_handle_mark_groups_arg_for_clustering
|
Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
|
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
# MASKED: _handle_mark_groups_arg_for_clustering function (lines 1469-1517)
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
| 1,469 | 1,517 |
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
__init__
|
Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary.
|
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
# MASKED: __init__ function (lines 83-97)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
| 83 | 97 |
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
sizes
|
Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
|
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
# MASKED: sizes function (lines 159-172)
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
| 159 | 172 |
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
summary
|
Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
|
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
# MASKED: summary function (lines 182-207)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
| 182 | 207 |
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
__init__
|
Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
|
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
# MASKED: __init__ function (lines 234-263)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
| 234 | 263 |
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
recalculate_modularity
|
Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
|
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
# MASKED: recalculate_modularity function (lines 374-387)
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
| 374 | 387 |
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
giant
|
Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
|
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
# MASKED: giant function (lines 424-437)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
| 424 | 437 |
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
_traverse_inorder
|
Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list.
|
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
# MASKED: _traverse_inorder function (lines 594-621)
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
| 594 | 621 |
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
format
|
Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
|
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
# MASKED: format function (lines 626-652)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
| 626 | 652 |
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
sizes
|
Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
|
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
# MASKED: sizes function (lines 1156-1164)
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
| 1,156 | 1,164 |
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
summary
|
Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
|
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
# MASKED: summary function (lines 1174-1198)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
| 1,174 | 1,198 |
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
__init__
|
Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
|
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# MASKED: __init__ function (lines 1221-1236)
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
| 1,221 | 1,236 |
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
__init__
|
Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
|
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
# MASKED: __init__ function (lines 1360-1389)
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
| 1,360 | 1,389 |
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
__plot__
|
Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
|
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
# MASKED: __plot__ function (lines 1440-1467)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
| 1,440 | 1,467 |
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
|
minute_timedeltas_wrt_first
|
Converts a list of datetime objects into a list of minute time deltas with respect to the first item.
For example, given the input datetime_lst:
[
'2019-08-22 14:32',
'2019-08-22 14:38',
'2019-08-22 14:42',
'2019-08-22 14:52',
'2019-08-22 14:57'
],
the result would be:
[32, 38, 42, 52, 57]
:param datetime_lst: list of datetime objects
:return: minute time deltas with respect to the first item of datetime_lst
|
from datetime import datetime, timedelta
from typing import Callable
def create_batch_list(n_batches: int) -> list:
return list(map(lambda x: x, range(1, n_batches + 1)))
def create_batch_dictionary(batch_lst: list, duration_lst: list, expected_finish_lst: list) -> dict:
batch_dict = {batch_lst[i]: (duration_lst[i], expected_finish_lst[i]) for i in range(0, len(batch_lst))}
return batch_dict
def create_result_batch_dictionary(batch_lst: list, start_datetime_lst: list, delta_time_lst: list) -> dict:
batch_dict = {batch_lst[i]: (start_datetime_lst[i], delta_time_lst[i]) for i in range(0, len(batch_lst))}
return batch_dict
def create_dynamic_ordering_constraint(index: int) -> str:
"""
Creates a valid AMPL constraint of the form:
[LaTex]: $start\_time_j+1 >= start\_time_j + duration_j$, $\forall j \in BATCH$
:param index: j index where the current constraint should start
:return: single AMPL JIT constraint as a string
"""
i = str(index)
i_next = str(index + 1)
constraint_name = f'ordering_{i_next}_{i}'
return f's.t. {constraint_name}: start_time[{i_next}] >= start_time[{i}] + duration[{i}];'
def create_multiple_ordering_constraints(start_index: int, last_index: int) -> str:
constraints = ''
for i in range(start_index, last_index):
constraints += f'{create_dynamic_ordering_constraint(i)}\n'
return constraints
def create_multiple_constraints(start_index: int, last_index: int, create_constraints: Callable[[int, int], str]):
return create_constraints(start_index, last_index)
def dict_to_list(obj: dict) -> list:
"""
Converts a dictionary to a list, extracting the values of the dictionary.
The list is sorted according to the dict's keys ascendant order.
The given dictionary should always have the same numeric keys as the result of create_batch_dictionary().
:param obj: the dictionary to convert which should have numeric keys
:return: the list of values in the dictionary
"""
return list(obj.values())
def strings_to_datetimes(str_date_lst: list, datetime_format: str) -> list:
"""
Converts a list of strings into a list of datetime objects
:param str_date_lst: list of string objects compatible with the ISO8601 format
:param datetime_format: format of the datetime
:return: list of datetime objects equivalent to the given str_date_lst
"""
return [datetime.strptime(d, datetime_format) for d in str_date_lst]
def minute_timedelta(first: datetime, second: datetime) -> int:
"""
Returns the difference expressed in minutes between 2 datetime objects
:param first: datetime object that comes before second
:param second: datetime object that comes after first
:return: difference in minutes between second and first
"""
delta: timedelta = second - first
return divmod(delta.total_seconds(), 60)[0]
# MASKED: minute_timedeltas_wrt_first function (lines 75-96)
def set_minutes_to_datetimes(datetime_lst: list, minutes_lst: list) -> list:
"""
Given a list of minutes and datetime objects, sets each amount of minutes to each datetime object with respect
to the list index. The two lists must have the same size.
:param datetime_lst: list of datetime objects
:param minutes_lst: list of minutes to set to a list of datetime objects
:return: list of datetime objects similar to datetime_lst but shifted according to minutes_lst
"""
return [d.replace(minute=0) + timedelta(minutes=m) for d, m in zip(datetime_lst, minutes_lst)]
def datetimes_to_strings(datetime_lst: list, datetime_format: str) -> list:
"""
Converts a list of datetime objects to strings, according to a certain datetime format.
:param datetime_lst: list of datetime objects to convert to string
:param datetime_format: format of the datetime
:return: the list of datetime objects converted to strings in the given datetime format
"""
return [d.strftime(datetime_format) for d in datetime_lst]
|
def minute_timedeltas_wrt_first(datetime_lst: list) -> list:
"""
Converts a list of datetime objects into a list of minute time deltas with respect to the first item.
For example, given the input datetime_lst:
[
'2019-08-22 14:32',
'2019-08-22 14:38',
'2019-08-22 14:42',
'2019-08-22 14:52',
'2019-08-22 14:57'
],
the result would be:
[32, 38, 42, 52, 57]
:param datetime_lst: list of datetime objects
:return: minute time deltas with respect to the first item of datetime_lst
"""
first_datetime: datetime = datetime_lst[0]
partial_deltas = [minute_timedelta(first=first_datetime, second=v) for v in datetime_lst[1:]]
first_minutes = first_datetime.minute
return [first_minutes] + list(map(lambda x: x + first_minutes, partial_deltas))
| 75 | 96 |
from datetime import datetime, timedelta
from typing import Callable
def create_batch_list(n_batches: int) -> list:
return list(map(lambda x: x, range(1, n_batches + 1)))
def create_batch_dictionary(batch_lst: list, duration_lst: list, expected_finish_lst: list) -> dict:
batch_dict = {batch_lst[i]: (duration_lst[i], expected_finish_lst[i]) for i in range(0, len(batch_lst))}
return batch_dict
def create_result_batch_dictionary(batch_lst: list, start_datetime_lst: list, delta_time_lst: list) -> dict:
batch_dict = {batch_lst[i]: (start_datetime_lst[i], delta_time_lst[i]) for i in range(0, len(batch_lst))}
return batch_dict
def create_dynamic_ordering_constraint(index: int) -> str:
"""
Creates a valid AMPL constraint of the form:
[LaTex]: $start\_time_j+1 >= start\_time_j + duration_j$, $\forall j \in BATCH$
:param index: j index where the current constraint should start
:return: single AMPL JIT constraint as a string
"""
i = str(index)
i_next = str(index + 1)
constraint_name = f'ordering_{i_next}_{i}'
return f's.t. {constraint_name}: start_time[{i_next}] >= start_time[{i}] + duration[{i}];'
def create_multiple_ordering_constraints(start_index: int, last_index: int) -> str:
constraints = ''
for i in range(start_index, last_index):
constraints += f'{create_dynamic_ordering_constraint(i)}\n'
return constraints
def create_multiple_constraints(start_index: int, last_index: int, create_constraints: Callable[[int, int], str]):
return create_constraints(start_index, last_index)
def dict_to_list(obj: dict) -> list:
"""
Converts a dictionary to a list, extracting the values of the dictionary.
The list is sorted according to the dict's keys ascendant order.
The given dictionary should always have the same numeric keys as the result of create_batch_dictionary().
:param obj: the dictionary to convert which should have numeric keys
:return: the list of values in the dictionary
"""
return list(obj.values())
def strings_to_datetimes(str_date_lst: list, datetime_format: str) -> list:
"""
Converts a list of strings into a list of datetime objects
:param str_date_lst: list of string objects compatible with the ISO8601 format
:param datetime_format: format of the datetime
:return: list of datetime objects equivalent to the given str_date_lst
"""
return [datetime.strptime(d, datetime_format) for d in str_date_lst]
def minute_timedelta(first: datetime, second: datetime) -> int:
"""
Returns the difference expressed in minutes between 2 datetime objects
:param first: datetime object that comes before second
:param second: datetime object that comes after first
:return: difference in minutes between second and first
"""
delta: timedelta = second - first
return divmod(delta.total_seconds(), 60)[0]
def minute_timedeltas_wrt_first(datetime_lst: list) -> list:
"""
Converts a list of datetime objects into a list of minute time deltas with respect to the first item.
For example, given the input datetime_lst:
[
'2019-08-22 14:32',
'2019-08-22 14:38',
'2019-08-22 14:42',
'2019-08-22 14:52',
'2019-08-22 14:57'
],
the result would be:
[32, 38, 42, 52, 57]
:param datetime_lst: list of datetime objects
:return: minute time deltas with respect to the first item of datetime_lst
"""
first_datetime: datetime = datetime_lst[0]
partial_deltas = [minute_timedelta(first=first_datetime, second=v) for v in datetime_lst[1:]]
first_minutes = first_datetime.minute
return [first_minutes] + list(map(lambda x: x + first_minutes, partial_deltas))
def set_minutes_to_datetimes(datetime_lst: list, minutes_lst: list) -> list:
"""
Given a list of minutes and datetime objects, sets each amount of minutes to each datetime object with respect
to the list index. The two lists must have the same size.
:param datetime_lst: list of datetime objects
:param minutes_lst: list of minutes to set to a list of datetime objects
:return: list of datetime objects similar to datetime_lst but shifted according to minutes_lst
"""
return [d.replace(minute=0) + timedelta(minutes=m) for d, m in zip(datetime_lst, minutes_lst)]
def datetimes_to_strings(datetime_lst: list, datetime_format: str) -> list:
"""
Converts a list of datetime objects to strings, according to a certain datetime format.
:param datetime_lst: list of datetime objects to convert to string
:param datetime_format: format of the datetime
:return: the list of datetime objects converted to strings in the given datetime format
"""
return [d.strftime(datetime_format) for d in datetime_lst]
|
_build_top_model
|
Build Top Model
Build the fully connected layers of the network.
Parameters
----------
input_shape : tuple
Input data shape
dense_output : tuple, optional
Size of dense output layers, default is (256, 1024)
dropout : float, optional
Dropout rate, default is 0.1
Returns
-------
keras.model
Fully connected top model
|
# -*- coding: utf-8 -*-
""" NETWORK
This module defines the BlendHunter class which can be used to retrain the
network or use predefined weights to make predictions on unseen data.
:Author: Samuel Farrens <[email protected]>
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from time import time
from cv2 import imread
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.layers import Dropout, Flatten, Dense, Input
from keras.applications import VGG16
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint
from keras.callbacks import EarlyStopping
from keras.callbacks import ReduceLROnPlateau
class BlendHunter(object):
""" BlendHunter
Class for identifying blended galaxy images in postage stamps.
Parameters
----------
image_shape : tuple, optional
Expected shape of input images
classes : tuple, optional
List of classes, default is ('blended', 'not_blended')
weights_path : str, optional
Path to weights, default is './weights'
top_model_file : str, optional
File name for top model weights, default is 'top_model_weights'
final_model_file : str, optional
File name of the final model weights, default is
'final_model_weights'
"""
def __init__(self, image_shape=None, classes=('blended', 'not_blended'),
weights_path='./weights', top_model_file='top_model_weights',
final_model_file='final_model_weights', verbose=0):
self._image_shape = image_shape
self._classes = classes
self._weights_path = weights_path
self._top_model_file = self._format(weights_path, top_model_file)
self._final_model_file = self._format(weights_path, final_model_file)
self._verbose = verbose
self.history = None
@staticmethod
def _format(path, name):
""" Format
Add path to name.
Parameters
----------
path : str
Base path
name : str
Path extension
Returns
-------
str
Formated path
"""
return '{}/{}'.format(path, name)
def getkwarg(self, key, default=None):
""" Get keyword agrument
Get value from keyword agruments if it exists otherwise return default.
Parameters
----------
key : str
Dictionary key
default : optional
Default value
"""
return self._kwargs[key] if key in self._kwargs else default
@staticmethod
def _get_image_shape(file):
""" Get Image Shape
Get the input image shape from an example image.
Parameters
----------
file : str
File name
Returns
-------
tuple
Image shape
"""
return imread(file).shape
def _get_target_shape(self, image_path=None):
""" Get Target Shape
Get the network target shape from the image shape.
Parameters
----------
image_path : str, optional
Path to image file
"""
if isinstance(self._image_shape, type(None)) and image_path:
file = self._format(image_path, os.listdir(image_path)[0])
self._image_shape = self._get_image_shape(file)
self._target_size = self._image_shape[:2]
def _load_generator(self, input_dir, batch_size=None,
class_mode=None, augmentation=False):
""" Load Generator
Load files from an input directory into a Keras generator.
Parameters
----------
input_dir : str
Input directory
batch_size : int, optional
Batch size
class_mode : str, optional
Generator class mode
shuffle : bool, optional
Option to shuffle input files
Returns
-------
keras_preprocessing.image.DirectoryIterator
Keras generator
"""
if augmentation:
datagen = ImageDataGenerator(rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
else:
datagen = ImageDataGenerator(rescale=1. / 255)
generator = (datagen.flow_from_directory(input_dir,
target_size=self._target_size,
batch_size=batch_size, class_mode=class_mode,
shuffle=False))
generator.steps = generator.n // generator.batch_size
return generator
def _get_feature(self, input_dir):
""" Get Feature
Get network feature and labels from VGG16 model.
Parameters
----------
input_dir : str
Input directory
Returns
-------
tuple
VGG16 bottleneck feature, class labels
"""
generator = self._load_generator(input_dir,
batch_size=self._batch_size_top)
labels = generator.classes[:generator.steps * self._batch_size_top]
return (self._vgg16_model.predict_generator(generator,
generator.steps), labels)
@staticmethod
def _save_data(data, data_type, file_path):
""" Save Data
Save data to file.
Parameters
----------
data : np.ndarray
Output data
data_type : str
Type of feature to be saved
file_path : str
File path
"""
file_name = '{}_{}.npy'.format(file_path, data_type)
np.save(file_name, data)
@staticmethod
def _load_data(data_type, file_path):
""" Load Data
Load data from file.
Parameters
----------
data_type : str
Type of feature to be loaded
file_path : str
File path
"""
file_name = '{}_{}.npy'.format(file_path, data_type)
if os.path.isfile(file_name):
return np.load(file_name)
else:
raise IOError('{} not found'.format(file_name))
@staticmethod
def _build_vgg16_model(input_shape=None):
""" Build VGG16 Model
Build VGG16 CNN model using imagenet weights.
Parameters
----------
input_shape : str, optional
Input data shape
Returns
-------
VGG16 model
"""
return VGG16(include_top=False, weights='imagenet',
input_shape=input_shape)
def _get_features(self):
""" Get Features
Get the network (bottleneck) features from the VGG16 model.
"""
self._vgg16_model = self._build_vgg16_model()
for key, value in self._features.items():
bot_feat, labels = self._get_feature(value['dir'])
if self._save_bottleneck:
self._save_data(bot_feat, key, self._bottleneck_file)
if self._save_labels:
self._save_data(labels, key, self._labels_file)
value['bottleneck'] = bot_feat
value['labels'] = labels
def _load_features(self):
""" Load Bottleneck Features
Load VGG16 bottleneck features.
"""
for feature_name in ('bottleneck', 'labels'):
if feature_name == 'bottleneck':
out_path = self._bottleneck_file
else:
out_path = self._labels_file
for key, value in self._features.items():
if feature_name not in value:
value[feature_name] = self._load_data(key, out_path)
# MASKED: _build_top_model function (lines 303-332)
def _train_top_model(self):
""" Train Top Model
Train fully connected top model of the network.
"""
self._load_features()
model = (self._build_top_model(
input_shape=self._features['train']['bottleneck'].shape[1:]))
model.compile(optimizer=self.getkwarg('top_opt', 'adam'),
loss=self.getkwarg('top_loss', 'binary_crossentropy'),
metrics=self.getkwarg('top_metrics', ['accuracy']))
top_model_file = '{}.h5'.format(self._top_model_file)
callbacks = []
callbacks.append(ModelCheckpoint(top_model_file,
monitor='val_loss', verbose=self._verbose,
save_best_only=True, save_weights_only=True,
mode='auto', period=1))
if self.getkwarg('top_early_stop', True):
min_delta = self.getkwarg('top_min_delta', 0.001)
patience = self.getkwarg('top_patience', 10)
callbacks.append(EarlyStopping(monitor='val_loss',
min_delta=min_delta,
patience=patience,
verbose=self._verbose))
callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=5, min_delta=0.001,
cooldown=2, verbose=self._verbose))
self.history = (model.fit(self._features['train']['bottleneck'],
self._features['train']['labels'],
epochs=self._epochs_top,
batch_size=self._batch_size_top,
callbacks=callbacks,
validation_data=(self._features['valid']['bottleneck'],
self._features['valid']['labels']),
verbose=self._verbose))
model.save_weights(top_model_file)
def plot_history(self):
""" Plot History
Plot the training history metrics.
"""
sns.set(style="darkgrid")
if not isinstance(self.history, type(None)):
plt.figure(figsize=(16, 8))
plt.subplot(121)
plt.plot(self.history.history['acc'])
plt.plot(self.history.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['train', 'valid'], loc='upper left')
plt.subplot(122)
plt.plot(self.history.history['loss'])
plt.plot(self.history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'valid'], loc='upper left')
plt.show()
else:
print('No history to display. Run training first.')
def _freeze_layers(self, model, depth):
""" Freeze Network Layers
Parameters
----------
model :
Keras model
depth : int
Depth of layers to be frozen
"""
for layer in model.layers[:depth]:
layer.trainable = False
def _build_final_model(self, load_top_weights=False,
load_final_weights=False):
""" Build Final Model
Build the final BlendHunter model.
Parameters
----------
load_top_weights : bool
Option to load the top model weights
load_final_weights : bool
Option to load the final model weights
Returns
-------
Final model
"""
vgg16_model = self._build_vgg16_model(self._image_shape)
top_model = self._build_top_model(vgg16_model.output_shape[1:],
dropout=0.4)
if load_top_weights:
top_model.load_weights('{}.h5'.format(self._top_model_file))
model = Model(inputs=vgg16_model.input,
outputs=top_model(vgg16_model.output))
if load_final_weights:
model.load_weights('{}.h5'.format(self._final_model_file))
return model
def _fine_tune(self):
""" Fine Tune
Fine tune the final model training.
"""
model = self._build_final_model(load_top_weights=True)
self._freeze_layers(model, 18)
model.compile(loss='binary_crossentropy',
optimizer=Adam(lr=0.0001),
metrics=['binary_accuracy'])
train_gen = self._load_generator(self._features['train']['dir'],
batch_size=self._batch_size_fine,
class_mode='binary',
augmentation=True)
valid_gen = self._load_generator(self._features['valid']['dir'],
batch_size=self._batch_size_fine,
class_mode='binary')
callbacks = []
callbacks.append(ModelCheckpoint('{}.h5'.format(self._fine_tune_file),
monitor='val_loss', verbose=self._verbose,
save_best_only=True, save_weights_only=True,
mode='auto', period=1))
callbacks.append(EarlyStopping(monitor='val_loss', min_delta=0.001,
patience=10, verbose=self._verbose))
callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=5, min_delta=0.001,
cooldown=2, verbose=self._verbose))
model.fit_generator(train_gen, steps_per_epoch=train_gen.steps,
epochs=self._epochs_fine,
callbacks=callbacks,
validation_data=valid_gen,
validation_steps=valid_gen.steps,
verbose=self._verbose)
self._freeze_layers(model, 19)
model.layers[17].trainable = True
model.compile(loss='binary_crossentropy',
optimizer=SGD(lr=10e-5),
metrics=['binary_accuracy'])
model.fit_generator(train_gen, steps_per_epoch=train_gen.steps,
epochs=self._epochs_fine,
callbacks=callbacks,
validation_data=valid_gen,
validation_steps=valid_gen.steps,
verbose=self._verbose)
model.save_weights('{}.h5'.format(self._final_model_file))
def train(self, input_path, get_features=True, train_top=True,
fine_tune=True, train_dir_name='train',
valid_dir_name='validation', epochs_top=500, epochs_fine=50,
batch_size_top=250, batch_size_fine=16, save_bottleneck=True,
bottleneck_file='bottleneck_features',
save_labels=True, labels_file='labels',
fine_tune_file='fine_tune_checkpoint',
top_model_file='top_model_weights', **kwargs):
""" Train
Train the BlendHunter network.
Parameters
----------
input_path : str
Path to input data
get_features : bool, optional
Option to get bottleneck features, default is True
train_top : bool, optional
Option to train top model, default is True
fine_tune : bool, optional
Option to run fine tuning component of training, default is True
train_dir_name : str, optional
Training data directory name, default is 'train'
valid_dir_name : str, optional
Validation data directory name, default is 'validation'
epochs_top : int, optional
Number of training epochs for top model, default is 500
epochs_fine : int, optional
Number of training epochs for fine tuning, default is 50
batch_size_top : int, optional
Batch size for top model, default is 256
batch_size_fine : int, optional
Batch size for fine tuning, default is 16
save_bottleneck : bool, optional
Option to save bottleneck features, default is True
bottleneck_file : str, optional
File name for bottleneck features, default is
'bottleneck_features'
fine_tune_file : str, optional
Training checkpoint for the fine tuning step, default is
'fine_tune_checkpoint'
"""
start = time()
self._epochs_top = epochs_top
self._epochs_fine = epochs_fine
self._batch_size_top = batch_size_top
self._batch_size_fine = batch_size_fine
self._save_bottleneck = save_bottleneck
self._save_labels = save_labels
self._bottleneck_file = self._format(self._weights_path,
bottleneck_file)
self._labels_file = self._format(self._weights_path, labels_file)
self._fine_tune_file = self._format(self._weights_path, fine_tune_file)
self._features = {'train': {}, 'valid': {}}
self._features['train']['dir'] = self._format(input_path,
train_dir_name)
self._features['valid']['dir'] = self._format(input_path,
valid_dir_name)
self._kwargs = kwargs
self._get_target_shape(self._format(self._features['train']['dir'],
self._classes[0]))
if get_features:
self._get_features()
if train_top:
self._train_top_model()
if fine_tune:
self._fine_tune()
end = time()
print('Duration {:0.2f}s'.format(end - start))
def predict(self, input_path=None, input_path_keras=None, input_data=None,
weights_type='fine'):
""" Predict
Predict classes for test data
Parameters
----------
input_path : str
Path to input data
input_path_keras : str
Path to input data in Keras format, i.e. path to directory one
level above where the data is stored
input_data : np.ndarray
Array of input images
weights_type : str, optional {'fine', 'top'}
Type of weights to use for predition, default is 'fine'
Returns
-------
dict
Dictionary of file names and corresponding classes
"""
if input_path:
test_path = '/'.join(input_path.split('/')[:-1])
elif input_path_keras:
test_path = input_path_keras
else:
test_path = None
if weights_type not in ('fine', 'top'):
raise ValueError('Invalid value for weights_type. Options are '
'"fine" or "top"')
if test_path:
self._get_target_shape(self._format(test_path,
os.listdir(test_path)[0]))
if weights_type == 'fine':
model = self._build_final_model(load_final_weights=True)
elif weights_type == 'top':
model = self._build_final_model(load_top_weights=True)
test_gen = self._load_generator(test_path,
class_mode='categorical',
batch_size=1)
self.filenames = test_gen.filenames
test_gen.reset()
res = model.predict_generator(test_gen,
verbose=self._verbose,
steps=test_gen.steps).flatten()
elif not isinstance(input_data, type(None)):
self._image_shape = input_data.shape[1:]
self._get_target_shape()
model = self._build_final_model(load_final_weights=True)
res = model.predict(input_data, verbose=self._verbose).flatten()
else:
raise RuntimeError('No input data provided.')
labels = {0: self._classes[0], 1: self._classes[1]}
preds = [labels[k] for k in np.around(res)]
return preds
|
@staticmethod
def _build_top_model(input_shape, dense_output=(256, 1024), dropout=0.1):
""" Build Top Model
Build the fully connected layers of the network.
Parameters
----------
input_shape : tuple
Input data shape
dense_output : tuple, optional
Size of dense output layers, default is (256, 1024)
dropout : float, optional
Dropout rate, default is 0.1
Returns
-------
keras.model
Fully connected top model
"""
model = Sequential()
model.add(Flatten(input_shape=input_shape))
model.add(Dense(dense_output[0]))
model.add(Dropout(dropout))
model.add(Dense(dense_output[1], activation='relu'))
model.add(Dense(1, activation='sigmoid'))
return model
| 303 | 332 |
# -*- coding: utf-8 -*-
""" NETWORK
This module defines the BlendHunter class which can be used to retrain the
network or use predefined weights to make predictions on unseen data.
:Author: Samuel Farrens <[email protected]>
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from time import time
from cv2 import imread
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.layers import Dropout, Flatten, Dense, Input
from keras.applications import VGG16
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint
from keras.callbacks import EarlyStopping
from keras.callbacks import ReduceLROnPlateau
class BlendHunter(object):
""" BlendHunter
Class for identifying blended galaxy images in postage stamps.
Parameters
----------
image_shape : tuple, optional
Expected shape of input images
classes : tuple, optional
List of classes, default is ('blended', 'not_blended')
weights_path : str, optional
Path to weights, default is './weights'
top_model_file : str, optional
File name for top model weights, default is 'top_model_weights'
final_model_file : str, optional
File name of the final model weights, default is
'final_model_weights'
"""
def __init__(self, image_shape=None, classes=('blended', 'not_blended'),
weights_path='./weights', top_model_file='top_model_weights',
final_model_file='final_model_weights', verbose=0):
self._image_shape = image_shape
self._classes = classes
self._weights_path = weights_path
self._top_model_file = self._format(weights_path, top_model_file)
self._final_model_file = self._format(weights_path, final_model_file)
self._verbose = verbose
self.history = None
@staticmethod
def _format(path, name):
""" Format
Add path to name.
Parameters
----------
path : str
Base path
name : str
Path extension
Returns
-------
str
Formated path
"""
return '{}/{}'.format(path, name)
def getkwarg(self, key, default=None):
""" Get keyword agrument
Get value from keyword agruments if it exists otherwise return default.
Parameters
----------
key : str
Dictionary key
default : optional
Default value
"""
return self._kwargs[key] if key in self._kwargs else default
@staticmethod
def _get_image_shape(file):
""" Get Image Shape
Get the input image shape from an example image.
Parameters
----------
file : str
File name
Returns
-------
tuple
Image shape
"""
return imread(file).shape
def _get_target_shape(self, image_path=None):
""" Get Target Shape
Get the network target shape from the image shape.
Parameters
----------
image_path : str, optional
Path to image file
"""
if isinstance(self._image_shape, type(None)) and image_path:
file = self._format(image_path, os.listdir(image_path)[0])
self._image_shape = self._get_image_shape(file)
self._target_size = self._image_shape[:2]
def _load_generator(self, input_dir, batch_size=None,
class_mode=None, augmentation=False):
""" Load Generator
Load files from an input directory into a Keras generator.
Parameters
----------
input_dir : str
Input directory
batch_size : int, optional
Batch size
class_mode : str, optional
Generator class mode
shuffle : bool, optional
Option to shuffle input files
Returns
-------
keras_preprocessing.image.DirectoryIterator
Keras generator
"""
if augmentation:
datagen = ImageDataGenerator(rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
else:
datagen = ImageDataGenerator(rescale=1. / 255)
generator = (datagen.flow_from_directory(input_dir,
target_size=self._target_size,
batch_size=batch_size, class_mode=class_mode,
shuffle=False))
generator.steps = generator.n // generator.batch_size
return generator
def _get_feature(self, input_dir):
""" Get Feature
Get network feature and labels from VGG16 model.
Parameters
----------
input_dir : str
Input directory
Returns
-------
tuple
VGG16 bottleneck feature, class labels
"""
generator = self._load_generator(input_dir,
batch_size=self._batch_size_top)
labels = generator.classes[:generator.steps * self._batch_size_top]
return (self._vgg16_model.predict_generator(generator,
generator.steps), labels)
@staticmethod
def _save_data(data, data_type, file_path):
""" Save Data
Save data to file.
Parameters
----------
data : np.ndarray
Output data
data_type : str
Type of feature to be saved
file_path : str
File path
"""
file_name = '{}_{}.npy'.format(file_path, data_type)
np.save(file_name, data)
@staticmethod
def _load_data(data_type, file_path):
""" Load Data
Load data from file.
Parameters
----------
data_type : str
Type of feature to be loaded
file_path : str
File path
"""
file_name = '{}_{}.npy'.format(file_path, data_type)
if os.path.isfile(file_name):
return np.load(file_name)
else:
raise IOError('{} not found'.format(file_name))
@staticmethod
def _build_vgg16_model(input_shape=None):
""" Build VGG16 Model
Build VGG16 CNN model using imagenet weights.
Parameters
----------
input_shape : str, optional
Input data shape
Returns
-------
VGG16 model
"""
return VGG16(include_top=False, weights='imagenet',
input_shape=input_shape)
def _get_features(self):
""" Get Features
Get the network (bottleneck) features from the VGG16 model.
"""
self._vgg16_model = self._build_vgg16_model()
for key, value in self._features.items():
bot_feat, labels = self._get_feature(value['dir'])
if self._save_bottleneck:
self._save_data(bot_feat, key, self._bottleneck_file)
if self._save_labels:
self._save_data(labels, key, self._labels_file)
value['bottleneck'] = bot_feat
value['labels'] = labels
def _load_features(self):
""" Load Bottleneck Features
Load VGG16 bottleneck features.
"""
for feature_name in ('bottleneck', 'labels'):
if feature_name == 'bottleneck':
out_path = self._bottleneck_file
else:
out_path = self._labels_file
for key, value in self._features.items():
if feature_name not in value:
value[feature_name] = self._load_data(key, out_path)
@staticmethod
def _build_top_model(input_shape, dense_output=(256, 1024), dropout=0.1):
""" Build Top Model
Build the fully connected layers of the network.
Parameters
----------
input_shape : tuple
Input data shape
dense_output : tuple, optional
Size of dense output layers, default is (256, 1024)
dropout : float, optional
Dropout rate, default is 0.1
Returns
-------
keras.model
Fully connected top model
"""
model = Sequential()
model.add(Flatten(input_shape=input_shape))
model.add(Dense(dense_output[0]))
model.add(Dropout(dropout))
model.add(Dense(dense_output[1], activation='relu'))
model.add(Dense(1, activation='sigmoid'))
return model
def _train_top_model(self):
""" Train Top Model
Train fully connected top model of the network.
"""
self._load_features()
model = (self._build_top_model(
input_shape=self._features['train']['bottleneck'].shape[1:]))
model.compile(optimizer=self.getkwarg('top_opt', 'adam'),
loss=self.getkwarg('top_loss', 'binary_crossentropy'),
metrics=self.getkwarg('top_metrics', ['accuracy']))
top_model_file = '{}.h5'.format(self._top_model_file)
callbacks = []
callbacks.append(ModelCheckpoint(top_model_file,
monitor='val_loss', verbose=self._verbose,
save_best_only=True, save_weights_only=True,
mode='auto', period=1))
if self.getkwarg('top_early_stop', True):
min_delta = self.getkwarg('top_min_delta', 0.001)
patience = self.getkwarg('top_patience', 10)
callbacks.append(EarlyStopping(monitor='val_loss',
min_delta=min_delta,
patience=patience,
verbose=self._verbose))
callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=5, min_delta=0.001,
cooldown=2, verbose=self._verbose))
self.history = (model.fit(self._features['train']['bottleneck'],
self._features['train']['labels'],
epochs=self._epochs_top,
batch_size=self._batch_size_top,
callbacks=callbacks,
validation_data=(self._features['valid']['bottleneck'],
self._features['valid']['labels']),
verbose=self._verbose))
model.save_weights(top_model_file)
def plot_history(self):
""" Plot History
Plot the training history metrics.
"""
sns.set(style="darkgrid")
if not isinstance(self.history, type(None)):
plt.figure(figsize=(16, 8))
plt.subplot(121)
plt.plot(self.history.history['acc'])
plt.plot(self.history.history['val_acc'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['train', 'valid'], loc='upper left')
plt.subplot(122)
plt.plot(self.history.history['loss'])
plt.plot(self.history.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['train', 'valid'], loc='upper left')
plt.show()
else:
print('No history to display. Run training first.')
def _freeze_layers(self, model, depth):
""" Freeze Network Layers
Parameters
----------
model :
Keras model
depth : int
Depth of layers to be frozen
"""
for layer in model.layers[:depth]:
layer.trainable = False
def _build_final_model(self, load_top_weights=False,
load_final_weights=False):
""" Build Final Model
Build the final BlendHunter model.
Parameters
----------
load_top_weights : bool
Option to load the top model weights
load_final_weights : bool
Option to load the final model weights
Returns
-------
Final model
"""
vgg16_model = self._build_vgg16_model(self._image_shape)
top_model = self._build_top_model(vgg16_model.output_shape[1:],
dropout=0.4)
if load_top_weights:
top_model.load_weights('{}.h5'.format(self._top_model_file))
model = Model(inputs=vgg16_model.input,
outputs=top_model(vgg16_model.output))
if load_final_weights:
model.load_weights('{}.h5'.format(self._final_model_file))
return model
def _fine_tune(self):
""" Fine Tune
Fine tune the final model training.
"""
model = self._build_final_model(load_top_weights=True)
self._freeze_layers(model, 18)
model.compile(loss='binary_crossentropy',
optimizer=Adam(lr=0.0001),
metrics=['binary_accuracy'])
train_gen = self._load_generator(self._features['train']['dir'],
batch_size=self._batch_size_fine,
class_mode='binary',
augmentation=True)
valid_gen = self._load_generator(self._features['valid']['dir'],
batch_size=self._batch_size_fine,
class_mode='binary')
callbacks = []
callbacks.append(ModelCheckpoint('{}.h5'.format(self._fine_tune_file),
monitor='val_loss', verbose=self._verbose,
save_best_only=True, save_weights_only=True,
mode='auto', period=1))
callbacks.append(EarlyStopping(monitor='val_loss', min_delta=0.001,
patience=10, verbose=self._verbose))
callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=5, min_delta=0.001,
cooldown=2, verbose=self._verbose))
model.fit_generator(train_gen, steps_per_epoch=train_gen.steps,
epochs=self._epochs_fine,
callbacks=callbacks,
validation_data=valid_gen,
validation_steps=valid_gen.steps,
verbose=self._verbose)
self._freeze_layers(model, 19)
model.layers[17].trainable = True
model.compile(loss='binary_crossentropy',
optimizer=SGD(lr=10e-5),
metrics=['binary_accuracy'])
model.fit_generator(train_gen, steps_per_epoch=train_gen.steps,
epochs=self._epochs_fine,
callbacks=callbacks,
validation_data=valid_gen,
validation_steps=valid_gen.steps,
verbose=self._verbose)
model.save_weights('{}.h5'.format(self._final_model_file))
def train(self, input_path, get_features=True, train_top=True,
fine_tune=True, train_dir_name='train',
valid_dir_name='validation', epochs_top=500, epochs_fine=50,
batch_size_top=250, batch_size_fine=16, save_bottleneck=True,
bottleneck_file='bottleneck_features',
save_labels=True, labels_file='labels',
fine_tune_file='fine_tune_checkpoint',
top_model_file='top_model_weights', **kwargs):
""" Train
Train the BlendHunter network.
Parameters
----------
input_path : str
Path to input data
get_features : bool, optional
Option to get bottleneck features, default is True
train_top : bool, optional
Option to train top model, default is True
fine_tune : bool, optional
Option to run fine tuning component of training, default is True
train_dir_name : str, optional
Training data directory name, default is 'train'
valid_dir_name : str, optional
Validation data directory name, default is 'validation'
epochs_top : int, optional
Number of training epochs for top model, default is 500
epochs_fine : int, optional
Number of training epochs for fine tuning, default is 50
batch_size_top : int, optional
Batch size for top model, default is 256
batch_size_fine : int, optional
Batch size for fine tuning, default is 16
save_bottleneck : bool, optional
Option to save bottleneck features, default is True
bottleneck_file : str, optional
File name for bottleneck features, default is
'bottleneck_features'
fine_tune_file : str, optional
Training checkpoint for the fine tuning step, default is
'fine_tune_checkpoint'
"""
start = time()
self._epochs_top = epochs_top
self._epochs_fine = epochs_fine
self._batch_size_top = batch_size_top
self._batch_size_fine = batch_size_fine
self._save_bottleneck = save_bottleneck
self._save_labels = save_labels
self._bottleneck_file = self._format(self._weights_path,
bottleneck_file)
self._labels_file = self._format(self._weights_path, labels_file)
self._fine_tune_file = self._format(self._weights_path, fine_tune_file)
self._features = {'train': {}, 'valid': {}}
self._features['train']['dir'] = self._format(input_path,
train_dir_name)
self._features['valid']['dir'] = self._format(input_path,
valid_dir_name)
self._kwargs = kwargs
self._get_target_shape(self._format(self._features['train']['dir'],
self._classes[0]))
if get_features:
self._get_features()
if train_top:
self._train_top_model()
if fine_tune:
self._fine_tune()
end = time()
print('Duration {:0.2f}s'.format(end - start))
def predict(self, input_path=None, input_path_keras=None, input_data=None,
weights_type='fine'):
""" Predict
Predict classes for test data
Parameters
----------
input_path : str
Path to input data
input_path_keras : str
Path to input data in Keras format, i.e. path to directory one
level above where the data is stored
input_data : np.ndarray
Array of input images
weights_type : str, optional {'fine', 'top'}
Type of weights to use for predition, default is 'fine'
Returns
-------
dict
Dictionary of file names and corresponding classes
"""
if input_path:
test_path = '/'.join(input_path.split('/')[:-1])
elif input_path_keras:
test_path = input_path_keras
else:
test_path = None
if weights_type not in ('fine', 'top'):
raise ValueError('Invalid value for weights_type. Options are '
'"fine" or "top"')
if test_path:
self._get_target_shape(self._format(test_path,
os.listdir(test_path)[0]))
if weights_type == 'fine':
model = self._build_final_model(load_final_weights=True)
elif weights_type == 'top':
model = self._build_final_model(load_top_weights=True)
test_gen = self._load_generator(test_path,
class_mode='categorical',
batch_size=1)
self.filenames = test_gen.filenames
test_gen.reset()
res = model.predict_generator(test_gen,
verbose=self._verbose,
steps=test_gen.steps).flatten()
elif not isinstance(input_data, type(None)):
self._image_shape = input_data.shape[1:]
self._get_target_shape()
model = self._build_final_model(load_final_weights=True)
res = model.predict(input_data, verbose=self._verbose).flatten()
else:
raise RuntimeError('No input data provided.')
labels = {0: self._classes[0], 1: self._classes[1]}
preds = [labels[k] for k in np.around(res)]
return preds
|
__init_subclass__
|
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `is_datasource_for`
attribute. This is then passed into the TimeSeries Factory so we can register them.
|
"""
TimeSeries is a generic time series class from which all other TimeSeries
classes inherit from.
"""
import copy
import warnings
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
import astropy
import astropy.units as u
from astropy.table import Table, Column
from sunpy import config
from sunpy.time import TimeRange
from sunpy.timeseries import TimeSeriesMetaData
from sunpy.util.metadata import MetaDict
from sunpy.util.exceptions import SunpyUserWarning
# define and register a new unit, needed for RHESSI
det = u.def_unit('detector')
u.add_enabled_units([det])
TIME_FORMAT = config.get("general", "time_format")
class GenericTimeSeries:
"""
A generic time series object.
Parameters
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional
The metadata giving details about the time series data/instrument.
units : dict, optional
A mapping from column names in *data* to the physical units of
that column.
Attributes
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`
The metadata giving details about the time series data/instrument.
units : dict
A mapping from column names in *data* to the physical units of
that column.
Examples
--------
>>> from sunpy.timeseries import TimeSeries
>>> from sunpy.time import parse_time
>>> import datetime
>>> from astropy.time import TimeDelta
>>> import numpy as np
>>> import pandas as pd
>>> base = parse_time(datetime.datetime.today())
>>> times = base - TimeDelta(np.arange(24 * 60)*u.minute)
>>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))
>>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])
>>> ts = TimeSeries(df)
>>> ts.peek() # doctest: +SKIP
References
----------
* `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = None
_registry = dict()
# MASKED: __init_subclass__ function (lines 80-90)
def __init__(self, data, meta=None, units=None, **kwargs):
self.data = data
tr = self.time_range
# Check metadata input
if meta is None:
# No meta given, so default
self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))
elif isinstance(meta, (dict, OrderedDict, MetaDict)):
# Given the values for metadata (dict) and infer timerange and colnames from the data
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
elif isinstance(meta, tuple):
# Given the values all in a tuple
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
else:
# Should have a list of 3-tuples giving a complex metadata list.
self.meta = meta
if units is None:
self.units = {}
else:
self.units = units
# Validate input data
# self._validate_meta()
# self._validate_units()
# #### Attribute definitions #### #
@property
def source(self):
"""
A string/object used to specify the source class of the TimeSeries.
"""
return self._source
@property
def columns(self):
"""A list of all the names of the columns in the data."""
return list(self.data.columns.values)
@property
def index(self):
"""The time index of the data."""
return self.data.index
@property
def time_range(self):
"""
The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`
object
"""
if len(self.data)>0:
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None
# #### Data Access, Selection and Organisation Methods #### #
def quantity(self, colname, **kwargs):
"""
Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want output.
Returns
-------
quantity : `~astropy.units.quantity.Quantity`
"""
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit)
def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
"""
Return an new TimeSeries with the given column added or updated.
Parameters
----------
colname : `str`
The heading of the column you want output.
quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`
The values to be placed within the column.
If updating values only then a numpy array is permitted.
overwrite : `bool`, optional, default:True
Set to true to allow the method to overwrite a column already present
in the TimeSeries.
Returns
-------
newts : TimeSeries
"""
# Get the expected units from the quantity if required
if not unit and isinstance(quantity, astropy.units.quantity.Quantity):
unit = quantity.unit
elif not unit:
unit = u.dimensionless_unscaled
# Make a copy of all the TimeSeries components.
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
# Add the unit to the units dictionary if already there.
if not (colname in self.data.columns):
units[colname] = unit
# Convert the given quantity into values for given units if necessary.
values = quantity
if isinstance(values, astropy.units.quantity.Quantity) and overwrite:
values = values.to(units[colname]).value
# Update or add the data.
if not (colname in self.data.columns) or overwrite:
data[colname] = values
# Return a new TimeSeries with the given updated/added column.
return self.__class__(data, meta, units)
def sort_index(self, **kwargs):
"""Returns a sorted version of the TimeSeries object.
Generally this shouldn't be necessary as most TimeSeries operations sort
the data anyway to ensure consistent behaviour when truncating.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series in ascending chronological order.
"""
return GenericTimeSeries(self.data.sort_index(**kwargs),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
def truncate(self, a, b=None, int=None):
"""Returns a truncated version of the TimeSeries object.
Parameters
----------
a : `sunpy.time.TimeRange`, `str` or `int`
Either a time range to truncate to, or a start time in some format
recognised by pandas, or a index integer.
b : `str` or `int`
If specified, the end time of the time range in some format
recognised by pandas, or a index integer.
int : `int`
If specified, the integer indicating the slicing intervals.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected times.
"""
# Evaluate inputs
# If given strings, then use to create a sunpy.time.timerange.TimeRange
# for the SunPy text date parser.
if isinstance(a, str) and isinstance(b, str):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
# If we have a TimeRange, extract the values
start = a.start.datetime
end = a.end.datetime
else:
# Otherwise we already have the values
start = a
end = b
# If an interval integer was given then use in truncation.
truncated_data = self.data.sort_index()[start:end:int]
# Truncate the metadata
# Check there is data still
truncated_meta = TimeSeriesMetaData([])
if len(truncated_data) > 0:
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
# Build similar TimeSeries object and sanatise metadata and units.
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def extract(self, column_name):
"""Returns a new time series with the chosen column.
Parameters
----------
column_name : `str`
A valid column name.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected column.
"""
"""
# TODO allow the extract function to pick more than one column
if isinstance(self, pandas.Series):
return self
else:
return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))
"""
# Extract column and remove empty rows
data = self.data[[column_name]].dropna()
# Build generic TimeSeries object and sanatise metadata and units.
object = GenericTimeSeries(data.sort_index(),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def concatenate(self, otherts, **kwargs):
"""Concatenate with another TimeSeries. This function will check and
remove any duplicate times. It will keep the column values from the
original time series to which the new time series is being added.
Parameters
----------
otherts : `~sunpy.timeseries.TimeSeries`
Another time series.
same_source : `bool` Optional
Set to true to check if the sources of the time series match.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series.
Debate: decide if we want to be able to concatenate multiple time series
at once.
"""
# check to see if nothing needs to be done
if self == otherts:
return self
# Check the sources match if specified.
same_source = kwargs.get('same_source', False)
if same_source and not (isinstance(otherts, self.__class__)):
raise TypeError("TimeSeries classes must match if specified.")
# Concatenate the metadata and data
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
# Add all the new units to the dictionary.
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
# If sources match then build similar TimeSeries.
if self.__class__ == otherts.__class__:
object = self.__class__(data.sort_index(), meta, units)
else:
# Build generic time series if the sources don't match.
object = GenericTimeSeries(data.sort_index(), meta, units)
# Sanatise metadata and units
object._sanitize_metadata()
object._sanitize_units()
return object
# #### Plotting Methods #### #
def plot(self, axes=None, **plot_args):
"""Plot a plot of the time series
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if axes is None:
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes
def peek(self, **kwargs):
"""Displays the time series in a new figure.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
# Now make the plot
figure = plt.figure()
self.plot(**kwargs)
figure.show()
def _validate_data_for_ploting(self):
"""Raises an exception if the timeseries is invalid for plotting.
To be added into all the peek methods in all source sup-classes.
Currently only checks if we have an empty timeseries, where:
len(self.data) == 0
"""
# Check we have a valid TS
if len(self.data) == 0:
raise ValueError("The timeseries can't be plotted as it has no data present. "
"(len(self.data) == 0)")
# #### Miscellaneous #### #
def _validate_meta(self):
"""
Validates the meta-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and
u.Unit(self.meta.get(meta_property),
parse_strict='silent').physical_type == 'unknown'):
warnings.warn(f"Unknown value for {meta_property.upper()}.", SunpyUserWarning)
def _validate_units(self, units, **kwargs):
"""
Validates the astropy unit-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
result = True
for key in units:
if not isinstance(units[key], astropy.units.UnitBase):
# If this is not a unit then this can't be a valid units dict.
result = False
warnings.warn(f"Invalid unit given for {key}.", SunpyUserWarning)
return result
def _sanitize_units(self, **kwargs):
"""
Sanitises the collections.OrderedDict used to store the units.
Primarily this method will:
Remove entries that don't match up to a column,
Add unitless entries for columns with no units defined.
Re-arrange the order of the dictionary to match the columns.
"""
warnings.simplefilter('always', Warning)
# Populate unspecified units:
for column in set(self.data.columns.tolist()) - set(self.units.keys()):
# For all columns not present in the units dictionary.
self.units[column] = u.dimensionless_unscaled
warnings.warn(f"Unknown units for {column}.", SunpyUserWarning)
# Re-arrange so it's in the same order as the columns and removed unused.
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column:self.units[column]})
# Now use the amended units Ordered Dictionary
self.units = units
def _sanitize_metadata(self, **kwargs):
"""
Sanitises the TimeSeriesMetaData object used to store the metadata.
Primarily this method will:
Remove entries outside of the datas TimeRange or truncate TimeRanges
if the metadata overflows past the data,
Remove column references in the metadata that don't match to a column
in the data.
Remove metadata entries that have no columns matching the data.
"""
warnings.simplefilter('always', Warning)
# Truncate the metadata
self.meta._truncate(self.time_range)
# Remove non-existant columns
redundant_cols = list(set(self.meta.columns) - set(self.columns))
self.meta._remove_columns(redundant_cols)
# #### Export/Output Methods #### #
def to_table(self, **kwargs):
"""
Return an Astropy Table of the give TimeSeries object.
Returns
-------
newtable : `~astrpy.table`
A new astropy table containing the data from the time series.
The table will include units where relevant.
"""
# ToDo: Table.from_pandas(df) doesn't include the index column. Add request?
# Get data columns
table = Table.from_pandas(self.data)
# Get index column and add to table.
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
# Add in units.
for key in self.units:
table[key].unit = self.units[key]
# Output the table
return table
def to_dataframe(self, **kwargs):
"""
Return a Pandas DataFrame of the give TimeSeries object.
Returns
-------
newdf : `~pandas.core.frame.DataFrame`
A Pandas Dataframe containing the data.
"""
return self.data
def to_array(self, columns=None):
"""
Return a numpy array of the give TimeSeries object.
Parameters
----------
columns: `list`, optional, default:None
If None, return all columns minus the index, otherwise, returns
specified columns.
Returns
-------
values : `~numpy.ndarray`
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
"""
if columns:
return self.data.values[columns]
else:
return self.data.values
def __eq__(self, other):
"""
Check two TimeSeries objects are the same, they have matching type, data,
metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or
(self.meta != other.meta) or
(self.units != other.units)):
match = False
else:
match = False
return match
def __ne__(self, other):
"""
Check two TimeSeries objects are not the same, they don't have matching
type, data, metadata and/or units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
return not self == other
@classmethod
def _parse_file(cls, filepath):
"""Parses a file - to be implemented in any subclass that may use files"""
return NotImplemented
|
def __init_subclass__(cls, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `is_datasource_for`
attribute. This is then passed into the TimeSeries Factory so we can register them.
"""
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for
| 80 | 90 |
"""
TimeSeries is a generic time series class from which all other TimeSeries
classes inherit from.
"""
import copy
import warnings
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
import astropy
import astropy.units as u
from astropy.table import Table, Column
from sunpy import config
from sunpy.time import TimeRange
from sunpy.timeseries import TimeSeriesMetaData
from sunpy.util.metadata import MetaDict
from sunpy.util.exceptions import SunpyUserWarning
# define and register a new unit, needed for RHESSI
det = u.def_unit('detector')
u.add_enabled_units([det])
TIME_FORMAT = config.get("general", "time_format")
class GenericTimeSeries:
"""
A generic time series object.
Parameters
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional
The metadata giving details about the time series data/instrument.
units : dict, optional
A mapping from column names in *data* to the physical units of
that column.
Attributes
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`
The metadata giving details about the time series data/instrument.
units : dict
A mapping from column names in *data* to the physical units of
that column.
Examples
--------
>>> from sunpy.timeseries import TimeSeries
>>> from sunpy.time import parse_time
>>> import datetime
>>> from astropy.time import TimeDelta
>>> import numpy as np
>>> import pandas as pd
>>> base = parse_time(datetime.datetime.today())
>>> times = base - TimeDelta(np.arange(24 * 60)*u.minute)
>>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))
>>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])
>>> ts = TimeSeries(df)
>>> ts.peek() # doctest: +SKIP
References
----------
* `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = None
_registry = dict()
def __init_subclass__(cls, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `is_datasource_for`
attribute. This is then passed into the TimeSeries Factory so we can register them.
"""
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for
def __init__(self, data, meta=None, units=None, **kwargs):
self.data = data
tr = self.time_range
# Check metadata input
if meta is None:
# No meta given, so default
self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))
elif isinstance(meta, (dict, OrderedDict, MetaDict)):
# Given the values for metadata (dict) and infer timerange and colnames from the data
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
elif isinstance(meta, tuple):
# Given the values all in a tuple
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
else:
# Should have a list of 3-tuples giving a complex metadata list.
self.meta = meta
if units is None:
self.units = {}
else:
self.units = units
# Validate input data
# self._validate_meta()
# self._validate_units()
# #### Attribute definitions #### #
@property
def source(self):
"""
A string/object used to specify the source class of the TimeSeries.
"""
return self._source
@property
def columns(self):
"""A list of all the names of the columns in the data."""
return list(self.data.columns.values)
@property
def index(self):
"""The time index of the data."""
return self.data.index
@property
def time_range(self):
"""
The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`
object
"""
if len(self.data)>0:
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None
# #### Data Access, Selection and Organisation Methods #### #
def quantity(self, colname, **kwargs):
"""
Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want output.
Returns
-------
quantity : `~astropy.units.quantity.Quantity`
"""
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit)
def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
"""
Return an new TimeSeries with the given column added or updated.
Parameters
----------
colname : `str`
The heading of the column you want output.
quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`
The values to be placed within the column.
If updating values only then a numpy array is permitted.
overwrite : `bool`, optional, default:True
Set to true to allow the method to overwrite a column already present
in the TimeSeries.
Returns
-------
newts : TimeSeries
"""
# Get the expected units from the quantity if required
if not unit and isinstance(quantity, astropy.units.quantity.Quantity):
unit = quantity.unit
elif not unit:
unit = u.dimensionless_unscaled
# Make a copy of all the TimeSeries components.
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
# Add the unit to the units dictionary if already there.
if not (colname in self.data.columns):
units[colname] = unit
# Convert the given quantity into values for given units if necessary.
values = quantity
if isinstance(values, astropy.units.quantity.Quantity) and overwrite:
values = values.to(units[colname]).value
# Update or add the data.
if not (colname in self.data.columns) or overwrite:
data[colname] = values
# Return a new TimeSeries with the given updated/added column.
return self.__class__(data, meta, units)
def sort_index(self, **kwargs):
"""Returns a sorted version of the TimeSeries object.
Generally this shouldn't be necessary as most TimeSeries operations sort
the data anyway to ensure consistent behaviour when truncating.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series in ascending chronological order.
"""
return GenericTimeSeries(self.data.sort_index(**kwargs),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
def truncate(self, a, b=None, int=None):
"""Returns a truncated version of the TimeSeries object.
Parameters
----------
a : `sunpy.time.TimeRange`, `str` or `int`
Either a time range to truncate to, or a start time in some format
recognised by pandas, or a index integer.
b : `str` or `int`
If specified, the end time of the time range in some format
recognised by pandas, or a index integer.
int : `int`
If specified, the integer indicating the slicing intervals.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected times.
"""
# Evaluate inputs
# If given strings, then use to create a sunpy.time.timerange.TimeRange
# for the SunPy text date parser.
if isinstance(a, str) and isinstance(b, str):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
# If we have a TimeRange, extract the values
start = a.start.datetime
end = a.end.datetime
else:
# Otherwise we already have the values
start = a
end = b
# If an interval integer was given then use in truncation.
truncated_data = self.data.sort_index()[start:end:int]
# Truncate the metadata
# Check there is data still
truncated_meta = TimeSeriesMetaData([])
if len(truncated_data) > 0:
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
# Build similar TimeSeries object and sanatise metadata and units.
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def extract(self, column_name):
"""Returns a new time series with the chosen column.
Parameters
----------
column_name : `str`
A valid column name.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected column.
"""
"""
# TODO allow the extract function to pick more than one column
if isinstance(self, pandas.Series):
return self
else:
return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))
"""
# Extract column and remove empty rows
data = self.data[[column_name]].dropna()
# Build generic TimeSeries object and sanatise metadata and units.
object = GenericTimeSeries(data.sort_index(),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def concatenate(self, otherts, **kwargs):
"""Concatenate with another TimeSeries. This function will check and
remove any duplicate times. It will keep the column values from the
original time series to which the new time series is being added.
Parameters
----------
otherts : `~sunpy.timeseries.TimeSeries`
Another time series.
same_source : `bool` Optional
Set to true to check if the sources of the time series match.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series.
Debate: decide if we want to be able to concatenate multiple time series
at once.
"""
# check to see if nothing needs to be done
if self == otherts:
return self
# Check the sources match if specified.
same_source = kwargs.get('same_source', False)
if same_source and not (isinstance(otherts, self.__class__)):
raise TypeError("TimeSeries classes must match if specified.")
# Concatenate the metadata and data
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
# Add all the new units to the dictionary.
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
# If sources match then build similar TimeSeries.
if self.__class__ == otherts.__class__:
object = self.__class__(data.sort_index(), meta, units)
else:
# Build generic time series if the sources don't match.
object = GenericTimeSeries(data.sort_index(), meta, units)
# Sanatise metadata and units
object._sanitize_metadata()
object._sanitize_units()
return object
# #### Plotting Methods #### #
def plot(self, axes=None, **plot_args):
"""Plot a plot of the time series
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if axes is None:
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes
def peek(self, **kwargs):
"""Displays the time series in a new figure.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
# Now make the plot
figure = plt.figure()
self.plot(**kwargs)
figure.show()
def _validate_data_for_ploting(self):
"""Raises an exception if the timeseries is invalid for plotting.
To be added into all the peek methods in all source sup-classes.
Currently only checks if we have an empty timeseries, where:
len(self.data) == 0
"""
# Check we have a valid TS
if len(self.data) == 0:
raise ValueError("The timeseries can't be plotted as it has no data present. "
"(len(self.data) == 0)")
# #### Miscellaneous #### #
def _validate_meta(self):
"""
Validates the meta-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and
u.Unit(self.meta.get(meta_property),
parse_strict='silent').physical_type == 'unknown'):
warnings.warn(f"Unknown value for {meta_property.upper()}.", SunpyUserWarning)
def _validate_units(self, units, **kwargs):
"""
Validates the astropy unit-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
result = True
for key in units:
if not isinstance(units[key], astropy.units.UnitBase):
# If this is not a unit then this can't be a valid units dict.
result = False
warnings.warn(f"Invalid unit given for {key}.", SunpyUserWarning)
return result
def _sanitize_units(self, **kwargs):
"""
Sanitises the collections.OrderedDict used to store the units.
Primarily this method will:
Remove entries that don't match up to a column,
Add unitless entries for columns with no units defined.
Re-arrange the order of the dictionary to match the columns.
"""
warnings.simplefilter('always', Warning)
# Populate unspecified units:
for column in set(self.data.columns.tolist()) - set(self.units.keys()):
# For all columns not present in the units dictionary.
self.units[column] = u.dimensionless_unscaled
warnings.warn(f"Unknown units for {column}.", SunpyUserWarning)
# Re-arrange so it's in the same order as the columns and removed unused.
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column:self.units[column]})
# Now use the amended units Ordered Dictionary
self.units = units
def _sanitize_metadata(self, **kwargs):
"""
Sanitises the TimeSeriesMetaData object used to store the metadata.
Primarily this method will:
Remove entries outside of the datas TimeRange or truncate TimeRanges
if the metadata overflows past the data,
Remove column references in the metadata that don't match to a column
in the data.
Remove metadata entries that have no columns matching the data.
"""
warnings.simplefilter('always', Warning)
# Truncate the metadata
self.meta._truncate(self.time_range)
# Remove non-existant columns
redundant_cols = list(set(self.meta.columns) - set(self.columns))
self.meta._remove_columns(redundant_cols)
# #### Export/Output Methods #### #
def to_table(self, **kwargs):
"""
Return an Astropy Table of the give TimeSeries object.
Returns
-------
newtable : `~astrpy.table`
A new astropy table containing the data from the time series.
The table will include units where relevant.
"""
# ToDo: Table.from_pandas(df) doesn't include the index column. Add request?
# Get data columns
table = Table.from_pandas(self.data)
# Get index column and add to table.
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
# Add in units.
for key in self.units:
table[key].unit = self.units[key]
# Output the table
return table
def to_dataframe(self, **kwargs):
"""
Return a Pandas DataFrame of the give TimeSeries object.
Returns
-------
newdf : `~pandas.core.frame.DataFrame`
A Pandas Dataframe containing the data.
"""
return self.data
def to_array(self, columns=None):
"""
Return a numpy array of the give TimeSeries object.
Parameters
----------
columns: `list`, optional, default:None
If None, return all columns minus the index, otherwise, returns
specified columns.
Returns
-------
values : `~numpy.ndarray`
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
"""
if columns:
return self.data.values[columns]
else:
return self.data.values
def __eq__(self, other):
"""
Check two TimeSeries objects are the same, they have matching type, data,
metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or
(self.meta != other.meta) or
(self.units != other.units)):
match = False
else:
match = False
return match
def __ne__(self, other):
"""
Check two TimeSeries objects are not the same, they don't have matching
type, data, metadata and/or units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
return not self == other
@classmethod
def _parse_file(cls, filepath):
"""Parses a file - to be implemented in any subclass that may use files"""
return NotImplemented
|
quantity
|
Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want output.
Returns
-------
quantity : `~astropy.units.quantity.Quantity`
|
"""
TimeSeries is a generic time series class from which all other TimeSeries
classes inherit from.
"""
import copy
import warnings
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
import astropy
import astropy.units as u
from astropy.table import Table, Column
from sunpy import config
from sunpy.time import TimeRange
from sunpy.timeseries import TimeSeriesMetaData
from sunpy.util.metadata import MetaDict
from sunpy.util.exceptions import SunpyUserWarning
# define and register a new unit, needed for RHESSI
det = u.def_unit('detector')
u.add_enabled_units([det])
TIME_FORMAT = config.get("general", "time_format")
class GenericTimeSeries:
"""
A generic time series object.
Parameters
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional
The metadata giving details about the time series data/instrument.
units : dict, optional
A mapping from column names in *data* to the physical units of
that column.
Attributes
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`
The metadata giving details about the time series data/instrument.
units : dict
A mapping from column names in *data* to the physical units of
that column.
Examples
--------
>>> from sunpy.timeseries import TimeSeries
>>> from sunpy.time import parse_time
>>> import datetime
>>> from astropy.time import TimeDelta
>>> import numpy as np
>>> import pandas as pd
>>> base = parse_time(datetime.datetime.today())
>>> times = base - TimeDelta(np.arange(24 * 60)*u.minute)
>>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))
>>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])
>>> ts = TimeSeries(df)
>>> ts.peek() # doctest: +SKIP
References
----------
* `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = None
_registry = dict()
def __init_subclass__(cls, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `is_datasource_for`
attribute. This is then passed into the TimeSeries Factory so we can register them.
"""
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for
def __init__(self, data, meta=None, units=None, **kwargs):
self.data = data
tr = self.time_range
# Check metadata input
if meta is None:
# No meta given, so default
self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))
elif isinstance(meta, (dict, OrderedDict, MetaDict)):
# Given the values for metadata (dict) and infer timerange and colnames from the data
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
elif isinstance(meta, tuple):
# Given the values all in a tuple
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
else:
# Should have a list of 3-tuples giving a complex metadata list.
self.meta = meta
if units is None:
self.units = {}
else:
self.units = units
# Validate input data
# self._validate_meta()
# self._validate_units()
# #### Attribute definitions #### #
@property
def source(self):
"""
A string/object used to specify the source class of the TimeSeries.
"""
return self._source
@property
def columns(self):
"""A list of all the names of the columns in the data."""
return list(self.data.columns.values)
@property
def index(self):
"""The time index of the data."""
return self.data.index
@property
def time_range(self):
"""
The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`
object
"""
if len(self.data)>0:
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None
# #### Data Access, Selection and Organisation Methods #### #
# MASKED: quantity function (lines 150-165)
def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
"""
Return an new TimeSeries with the given column added or updated.
Parameters
----------
colname : `str`
The heading of the column you want output.
quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`
The values to be placed within the column.
If updating values only then a numpy array is permitted.
overwrite : `bool`, optional, default:True
Set to true to allow the method to overwrite a column already present
in the TimeSeries.
Returns
-------
newts : TimeSeries
"""
# Get the expected units from the quantity if required
if not unit and isinstance(quantity, astropy.units.quantity.Quantity):
unit = quantity.unit
elif not unit:
unit = u.dimensionless_unscaled
# Make a copy of all the TimeSeries components.
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
# Add the unit to the units dictionary if already there.
if not (colname in self.data.columns):
units[colname] = unit
# Convert the given quantity into values for given units if necessary.
values = quantity
if isinstance(values, astropy.units.quantity.Quantity) and overwrite:
values = values.to(units[colname]).value
# Update or add the data.
if not (colname in self.data.columns) or overwrite:
data[colname] = values
# Return a new TimeSeries with the given updated/added column.
return self.__class__(data, meta, units)
def sort_index(self, **kwargs):
"""Returns a sorted version of the TimeSeries object.
Generally this shouldn't be necessary as most TimeSeries operations sort
the data anyway to ensure consistent behaviour when truncating.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series in ascending chronological order.
"""
return GenericTimeSeries(self.data.sort_index(**kwargs),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
def truncate(self, a, b=None, int=None):
"""Returns a truncated version of the TimeSeries object.
Parameters
----------
a : `sunpy.time.TimeRange`, `str` or `int`
Either a time range to truncate to, or a start time in some format
recognised by pandas, or a index integer.
b : `str` or `int`
If specified, the end time of the time range in some format
recognised by pandas, or a index integer.
int : `int`
If specified, the integer indicating the slicing intervals.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected times.
"""
# Evaluate inputs
# If given strings, then use to create a sunpy.time.timerange.TimeRange
# for the SunPy text date parser.
if isinstance(a, str) and isinstance(b, str):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
# If we have a TimeRange, extract the values
start = a.start.datetime
end = a.end.datetime
else:
# Otherwise we already have the values
start = a
end = b
# If an interval integer was given then use in truncation.
truncated_data = self.data.sort_index()[start:end:int]
# Truncate the metadata
# Check there is data still
truncated_meta = TimeSeriesMetaData([])
if len(truncated_data) > 0:
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
# Build similar TimeSeries object and sanatise metadata and units.
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def extract(self, column_name):
"""Returns a new time series with the chosen column.
Parameters
----------
column_name : `str`
A valid column name.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected column.
"""
"""
# TODO allow the extract function to pick more than one column
if isinstance(self, pandas.Series):
return self
else:
return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))
"""
# Extract column and remove empty rows
data = self.data[[column_name]].dropna()
# Build generic TimeSeries object and sanatise metadata and units.
object = GenericTimeSeries(data.sort_index(),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def concatenate(self, otherts, **kwargs):
"""Concatenate with another TimeSeries. This function will check and
remove any duplicate times. It will keep the column values from the
original time series to which the new time series is being added.
Parameters
----------
otherts : `~sunpy.timeseries.TimeSeries`
Another time series.
same_source : `bool` Optional
Set to true to check if the sources of the time series match.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series.
Debate: decide if we want to be able to concatenate multiple time series
at once.
"""
# check to see if nothing needs to be done
if self == otherts:
return self
# Check the sources match if specified.
same_source = kwargs.get('same_source', False)
if same_source and not (isinstance(otherts, self.__class__)):
raise TypeError("TimeSeries classes must match if specified.")
# Concatenate the metadata and data
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
# Add all the new units to the dictionary.
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
# If sources match then build similar TimeSeries.
if self.__class__ == otherts.__class__:
object = self.__class__(data.sort_index(), meta, units)
else:
# Build generic time series if the sources don't match.
object = GenericTimeSeries(data.sort_index(), meta, units)
# Sanatise metadata and units
object._sanitize_metadata()
object._sanitize_units()
return object
# #### Plotting Methods #### #
def plot(self, axes=None, **plot_args):
"""Plot a plot of the time series
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if axes is None:
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes
def peek(self, **kwargs):
"""Displays the time series in a new figure.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
# Now make the plot
figure = plt.figure()
self.plot(**kwargs)
figure.show()
def _validate_data_for_ploting(self):
"""Raises an exception if the timeseries is invalid for plotting.
To be added into all the peek methods in all source sup-classes.
Currently only checks if we have an empty timeseries, where:
len(self.data) == 0
"""
# Check we have a valid TS
if len(self.data) == 0:
raise ValueError("The timeseries can't be plotted as it has no data present. "
"(len(self.data) == 0)")
# #### Miscellaneous #### #
def _validate_meta(self):
"""
Validates the meta-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and
u.Unit(self.meta.get(meta_property),
parse_strict='silent').physical_type == 'unknown'):
warnings.warn(f"Unknown value for {meta_property.upper()}.", SunpyUserWarning)
def _validate_units(self, units, **kwargs):
"""
Validates the astropy unit-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
result = True
for key in units:
if not isinstance(units[key], astropy.units.UnitBase):
# If this is not a unit then this can't be a valid units dict.
result = False
warnings.warn(f"Invalid unit given for {key}.", SunpyUserWarning)
return result
def _sanitize_units(self, **kwargs):
"""
Sanitises the collections.OrderedDict used to store the units.
Primarily this method will:
Remove entries that don't match up to a column,
Add unitless entries for columns with no units defined.
Re-arrange the order of the dictionary to match the columns.
"""
warnings.simplefilter('always', Warning)
# Populate unspecified units:
for column in set(self.data.columns.tolist()) - set(self.units.keys()):
# For all columns not present in the units dictionary.
self.units[column] = u.dimensionless_unscaled
warnings.warn(f"Unknown units for {column}.", SunpyUserWarning)
# Re-arrange so it's in the same order as the columns and removed unused.
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column:self.units[column]})
# Now use the amended units Ordered Dictionary
self.units = units
def _sanitize_metadata(self, **kwargs):
"""
Sanitises the TimeSeriesMetaData object used to store the metadata.
Primarily this method will:
Remove entries outside of the datas TimeRange or truncate TimeRanges
if the metadata overflows past the data,
Remove column references in the metadata that don't match to a column
in the data.
Remove metadata entries that have no columns matching the data.
"""
warnings.simplefilter('always', Warning)
# Truncate the metadata
self.meta._truncate(self.time_range)
# Remove non-existant columns
redundant_cols = list(set(self.meta.columns) - set(self.columns))
self.meta._remove_columns(redundant_cols)
# #### Export/Output Methods #### #
def to_table(self, **kwargs):
"""
Return an Astropy Table of the give TimeSeries object.
Returns
-------
newtable : `~astrpy.table`
A new astropy table containing the data from the time series.
The table will include units where relevant.
"""
# ToDo: Table.from_pandas(df) doesn't include the index column. Add request?
# Get data columns
table = Table.from_pandas(self.data)
# Get index column and add to table.
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
# Add in units.
for key in self.units:
table[key].unit = self.units[key]
# Output the table
return table
def to_dataframe(self, **kwargs):
"""
Return a Pandas DataFrame of the give TimeSeries object.
Returns
-------
newdf : `~pandas.core.frame.DataFrame`
A Pandas Dataframe containing the data.
"""
return self.data
def to_array(self, columns=None):
"""
Return a numpy array of the give TimeSeries object.
Parameters
----------
columns: `list`, optional, default:None
If None, return all columns minus the index, otherwise, returns
specified columns.
Returns
-------
values : `~numpy.ndarray`
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
"""
if columns:
return self.data.values[columns]
else:
return self.data.values
def __eq__(self, other):
"""
Check two TimeSeries objects are the same, they have matching type, data,
metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or
(self.meta != other.meta) or
(self.units != other.units)):
match = False
else:
match = False
return match
def __ne__(self, other):
"""
Check two TimeSeries objects are not the same, they don't have matching
type, data, metadata and/or units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
return not self == other
@classmethod
def _parse_file(cls, filepath):
"""Parses a file - to be implemented in any subclass that may use files"""
return NotImplemented
|
def quantity(self, colname, **kwargs):
"""
Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want output.
Returns
-------
quantity : `~astropy.units.quantity.Quantity`
"""
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit)
| 150 | 165 |
"""
TimeSeries is a generic time series class from which all other TimeSeries
classes inherit from.
"""
import copy
import warnings
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
import astropy
import astropy.units as u
from astropy.table import Table, Column
from sunpy import config
from sunpy.time import TimeRange
from sunpy.timeseries import TimeSeriesMetaData
from sunpy.util.metadata import MetaDict
from sunpy.util.exceptions import SunpyUserWarning
# define and register a new unit, needed for RHESSI
det = u.def_unit('detector')
u.add_enabled_units([det])
TIME_FORMAT = config.get("general", "time_format")
class GenericTimeSeries:
"""
A generic time series object.
Parameters
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional
The metadata giving details about the time series data/instrument.
units : dict, optional
A mapping from column names in *data* to the physical units of
that column.
Attributes
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`
The metadata giving details about the time series data/instrument.
units : dict
A mapping from column names in *data* to the physical units of
that column.
Examples
--------
>>> from sunpy.timeseries import TimeSeries
>>> from sunpy.time import parse_time
>>> import datetime
>>> from astropy.time import TimeDelta
>>> import numpy as np
>>> import pandas as pd
>>> base = parse_time(datetime.datetime.today())
>>> times = base - TimeDelta(np.arange(24 * 60)*u.minute)
>>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))
>>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])
>>> ts = TimeSeries(df)
>>> ts.peek() # doctest: +SKIP
References
----------
* `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = None
_registry = dict()
def __init_subclass__(cls, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `is_datasource_for`
attribute. This is then passed into the TimeSeries Factory so we can register them.
"""
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for
def __init__(self, data, meta=None, units=None, **kwargs):
self.data = data
tr = self.time_range
# Check metadata input
if meta is None:
# No meta given, so default
self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))
elif isinstance(meta, (dict, OrderedDict, MetaDict)):
# Given the values for metadata (dict) and infer timerange and colnames from the data
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
elif isinstance(meta, tuple):
# Given the values all in a tuple
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
else:
# Should have a list of 3-tuples giving a complex metadata list.
self.meta = meta
if units is None:
self.units = {}
else:
self.units = units
# Validate input data
# self._validate_meta()
# self._validate_units()
# #### Attribute definitions #### #
@property
def source(self):
"""
A string/object used to specify the source class of the TimeSeries.
"""
return self._source
@property
def columns(self):
"""A list of all the names of the columns in the data."""
return list(self.data.columns.values)
@property
def index(self):
"""The time index of the data."""
return self.data.index
@property
def time_range(self):
"""
The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`
object
"""
if len(self.data)>0:
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None
# #### Data Access, Selection and Organisation Methods #### #
def quantity(self, colname, **kwargs):
"""
Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want output.
Returns
-------
quantity : `~astropy.units.quantity.Quantity`
"""
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit)
def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
"""
Return an new TimeSeries with the given column added or updated.
Parameters
----------
colname : `str`
The heading of the column you want output.
quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`
The values to be placed within the column.
If updating values only then a numpy array is permitted.
overwrite : `bool`, optional, default:True
Set to true to allow the method to overwrite a column already present
in the TimeSeries.
Returns
-------
newts : TimeSeries
"""
# Get the expected units from the quantity if required
if not unit and isinstance(quantity, astropy.units.quantity.Quantity):
unit = quantity.unit
elif not unit:
unit = u.dimensionless_unscaled
# Make a copy of all the TimeSeries components.
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
# Add the unit to the units dictionary if already there.
if not (colname in self.data.columns):
units[colname] = unit
# Convert the given quantity into values for given units if necessary.
values = quantity
if isinstance(values, astropy.units.quantity.Quantity) and overwrite:
values = values.to(units[colname]).value
# Update or add the data.
if not (colname in self.data.columns) or overwrite:
data[colname] = values
# Return a new TimeSeries with the given updated/added column.
return self.__class__(data, meta, units)
def sort_index(self, **kwargs):
"""Returns a sorted version of the TimeSeries object.
Generally this shouldn't be necessary as most TimeSeries operations sort
the data anyway to ensure consistent behaviour when truncating.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series in ascending chronological order.
"""
return GenericTimeSeries(self.data.sort_index(**kwargs),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
def truncate(self, a, b=None, int=None):
"""Returns a truncated version of the TimeSeries object.
Parameters
----------
a : `sunpy.time.TimeRange`, `str` or `int`
Either a time range to truncate to, or a start time in some format
recognised by pandas, or a index integer.
b : `str` or `int`
If specified, the end time of the time range in some format
recognised by pandas, or a index integer.
int : `int`
If specified, the integer indicating the slicing intervals.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected times.
"""
# Evaluate inputs
# If given strings, then use to create a sunpy.time.timerange.TimeRange
# for the SunPy text date parser.
if isinstance(a, str) and isinstance(b, str):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
# If we have a TimeRange, extract the values
start = a.start.datetime
end = a.end.datetime
else:
# Otherwise we already have the values
start = a
end = b
# If an interval integer was given then use in truncation.
truncated_data = self.data.sort_index()[start:end:int]
# Truncate the metadata
# Check there is data still
truncated_meta = TimeSeriesMetaData([])
if len(truncated_data) > 0:
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
# Build similar TimeSeries object and sanatise metadata and units.
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def extract(self, column_name):
"""Returns a new time series with the chosen column.
Parameters
----------
column_name : `str`
A valid column name.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected column.
"""
"""
# TODO allow the extract function to pick more than one column
if isinstance(self, pandas.Series):
return self
else:
return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))
"""
# Extract column and remove empty rows
data = self.data[[column_name]].dropna()
# Build generic TimeSeries object and sanatise metadata and units.
object = GenericTimeSeries(data.sort_index(),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def concatenate(self, otherts, **kwargs):
"""Concatenate with another TimeSeries. This function will check and
remove any duplicate times. It will keep the column values from the
original time series to which the new time series is being added.
Parameters
----------
otherts : `~sunpy.timeseries.TimeSeries`
Another time series.
same_source : `bool` Optional
Set to true to check if the sources of the time series match.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series.
Debate: decide if we want to be able to concatenate multiple time series
at once.
"""
# check to see if nothing needs to be done
if self == otherts:
return self
# Check the sources match if specified.
same_source = kwargs.get('same_source', False)
if same_source and not (isinstance(otherts, self.__class__)):
raise TypeError("TimeSeries classes must match if specified.")
# Concatenate the metadata and data
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
# Add all the new units to the dictionary.
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
# If sources match then build similar TimeSeries.
if self.__class__ == otherts.__class__:
object = self.__class__(data.sort_index(), meta, units)
else:
# Build generic time series if the sources don't match.
object = GenericTimeSeries(data.sort_index(), meta, units)
# Sanatise metadata and units
object._sanitize_metadata()
object._sanitize_units()
return object
# #### Plotting Methods #### #
def plot(self, axes=None, **plot_args):
"""Plot a plot of the time series
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if axes is None:
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes
def peek(self, **kwargs):
"""Displays the time series in a new figure.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
# Now make the plot
figure = plt.figure()
self.plot(**kwargs)
figure.show()
def _validate_data_for_ploting(self):
"""Raises an exception if the timeseries is invalid for plotting.
To be added into all the peek methods in all source sup-classes.
Currently only checks if we have an empty timeseries, where:
len(self.data) == 0
"""
# Check we have a valid TS
if len(self.data) == 0:
raise ValueError("The timeseries can't be plotted as it has no data present. "
"(len(self.data) == 0)")
# #### Miscellaneous #### #
def _validate_meta(self):
"""
Validates the meta-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and
u.Unit(self.meta.get(meta_property),
parse_strict='silent').physical_type == 'unknown'):
warnings.warn(f"Unknown value for {meta_property.upper()}.", SunpyUserWarning)
def _validate_units(self, units, **kwargs):
"""
Validates the astropy unit-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
result = True
for key in units:
if not isinstance(units[key], astropy.units.UnitBase):
# If this is not a unit then this can't be a valid units dict.
result = False
warnings.warn(f"Invalid unit given for {key}.", SunpyUserWarning)
return result
def _sanitize_units(self, **kwargs):
"""
Sanitises the collections.OrderedDict used to store the units.
Primarily this method will:
Remove entries that don't match up to a column,
Add unitless entries for columns with no units defined.
Re-arrange the order of the dictionary to match the columns.
"""
warnings.simplefilter('always', Warning)
# Populate unspecified units:
for column in set(self.data.columns.tolist()) - set(self.units.keys()):
# For all columns not present in the units dictionary.
self.units[column] = u.dimensionless_unscaled
warnings.warn(f"Unknown units for {column}.", SunpyUserWarning)
# Re-arrange so it's in the same order as the columns and removed unused.
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column:self.units[column]})
# Now use the amended units Ordered Dictionary
self.units = units
def _sanitize_metadata(self, **kwargs):
"""
Sanitises the TimeSeriesMetaData object used to store the metadata.
Primarily this method will:
Remove entries outside of the datas TimeRange or truncate TimeRanges
if the metadata overflows past the data,
Remove column references in the metadata that don't match to a column
in the data.
Remove metadata entries that have no columns matching the data.
"""
warnings.simplefilter('always', Warning)
# Truncate the metadata
self.meta._truncate(self.time_range)
# Remove non-existant columns
redundant_cols = list(set(self.meta.columns) - set(self.columns))
self.meta._remove_columns(redundant_cols)
# #### Export/Output Methods #### #
def to_table(self, **kwargs):
"""
Return an Astropy Table of the give TimeSeries object.
Returns
-------
newtable : `~astrpy.table`
A new astropy table containing the data from the time series.
The table will include units where relevant.
"""
# ToDo: Table.from_pandas(df) doesn't include the index column. Add request?
# Get data columns
table = Table.from_pandas(self.data)
# Get index column and add to table.
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
# Add in units.
for key in self.units:
table[key].unit = self.units[key]
# Output the table
return table
def to_dataframe(self, **kwargs):
"""
Return a Pandas DataFrame of the give TimeSeries object.
Returns
-------
newdf : `~pandas.core.frame.DataFrame`
A Pandas Dataframe containing the data.
"""
return self.data
def to_array(self, columns=None):
"""
Return a numpy array of the give TimeSeries object.
Parameters
----------
columns: `list`, optional, default:None
If None, return all columns minus the index, otherwise, returns
specified columns.
Returns
-------
values : `~numpy.ndarray`
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
"""
if columns:
return self.data.values[columns]
else:
return self.data.values
def __eq__(self, other):
"""
Check two TimeSeries objects are the same, they have matching type, data,
metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or
(self.meta != other.meta) or
(self.units != other.units)):
match = False
else:
match = False
return match
def __ne__(self, other):
"""
Check two TimeSeries objects are not the same, they don't have matching
type, data, metadata and/or units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
return not self == other
@classmethod
def _parse_file(cls, filepath):
"""Parses a file - to be implemented in any subclass that may use files"""
return NotImplemented
|
plot
|
Plot a plot of the time series
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
|
"""
TimeSeries is a generic time series class from which all other TimeSeries
classes inherit from.
"""
import copy
import warnings
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
import astropy
import astropy.units as u
from astropy.table import Table, Column
from sunpy import config
from sunpy.time import TimeRange
from sunpy.timeseries import TimeSeriesMetaData
from sunpy.util.metadata import MetaDict
from sunpy.util.exceptions import SunpyUserWarning
# define and register a new unit, needed for RHESSI
det = u.def_unit('detector')
u.add_enabled_units([det])
TIME_FORMAT = config.get("general", "time_format")
class GenericTimeSeries:
"""
A generic time series object.
Parameters
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional
The metadata giving details about the time series data/instrument.
units : dict, optional
A mapping from column names in *data* to the physical units of
that column.
Attributes
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`
The metadata giving details about the time series data/instrument.
units : dict
A mapping from column names in *data* to the physical units of
that column.
Examples
--------
>>> from sunpy.timeseries import TimeSeries
>>> from sunpy.time import parse_time
>>> import datetime
>>> from astropy.time import TimeDelta
>>> import numpy as np
>>> import pandas as pd
>>> base = parse_time(datetime.datetime.today())
>>> times = base - TimeDelta(np.arange(24 * 60)*u.minute)
>>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))
>>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])
>>> ts = TimeSeries(df)
>>> ts.peek() # doctest: +SKIP
References
----------
* `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = None
_registry = dict()
def __init_subclass__(cls, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `is_datasource_for`
attribute. This is then passed into the TimeSeries Factory so we can register them.
"""
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for
def __init__(self, data, meta=None, units=None, **kwargs):
self.data = data
tr = self.time_range
# Check metadata input
if meta is None:
# No meta given, so default
self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))
elif isinstance(meta, (dict, OrderedDict, MetaDict)):
# Given the values for metadata (dict) and infer timerange and colnames from the data
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
elif isinstance(meta, tuple):
# Given the values all in a tuple
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
else:
# Should have a list of 3-tuples giving a complex metadata list.
self.meta = meta
if units is None:
self.units = {}
else:
self.units = units
# Validate input data
# self._validate_meta()
# self._validate_units()
# #### Attribute definitions #### #
@property
def source(self):
"""
A string/object used to specify the source class of the TimeSeries.
"""
return self._source
@property
def columns(self):
"""A list of all the names of the columns in the data."""
return list(self.data.columns.values)
@property
def index(self):
"""The time index of the data."""
return self.data.index
@property
def time_range(self):
"""
The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`
object
"""
if len(self.data)>0:
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None
# #### Data Access, Selection and Organisation Methods #### #
def quantity(self, colname, **kwargs):
"""
Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want output.
Returns
-------
quantity : `~astropy.units.quantity.Quantity`
"""
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit)
def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
"""
Return an new TimeSeries with the given column added or updated.
Parameters
----------
colname : `str`
The heading of the column you want output.
quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`
The values to be placed within the column.
If updating values only then a numpy array is permitted.
overwrite : `bool`, optional, default:True
Set to true to allow the method to overwrite a column already present
in the TimeSeries.
Returns
-------
newts : TimeSeries
"""
# Get the expected units from the quantity if required
if not unit and isinstance(quantity, astropy.units.quantity.Quantity):
unit = quantity.unit
elif not unit:
unit = u.dimensionless_unscaled
# Make a copy of all the TimeSeries components.
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
# Add the unit to the units dictionary if already there.
if not (colname in self.data.columns):
units[colname] = unit
# Convert the given quantity into values for given units if necessary.
values = quantity
if isinstance(values, astropy.units.quantity.Quantity) and overwrite:
values = values.to(units[colname]).value
# Update or add the data.
if not (colname in self.data.columns) or overwrite:
data[colname] = values
# Return a new TimeSeries with the given updated/added column.
return self.__class__(data, meta, units)
def sort_index(self, **kwargs):
"""Returns a sorted version of the TimeSeries object.
Generally this shouldn't be necessary as most TimeSeries operations sort
the data anyway to ensure consistent behaviour when truncating.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series in ascending chronological order.
"""
return GenericTimeSeries(self.data.sort_index(**kwargs),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
def truncate(self, a, b=None, int=None):
"""Returns a truncated version of the TimeSeries object.
Parameters
----------
a : `sunpy.time.TimeRange`, `str` or `int`
Either a time range to truncate to, or a start time in some format
recognised by pandas, or a index integer.
b : `str` or `int`
If specified, the end time of the time range in some format
recognised by pandas, or a index integer.
int : `int`
If specified, the integer indicating the slicing intervals.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected times.
"""
# Evaluate inputs
# If given strings, then use to create a sunpy.time.timerange.TimeRange
# for the SunPy text date parser.
if isinstance(a, str) and isinstance(b, str):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
# If we have a TimeRange, extract the values
start = a.start.datetime
end = a.end.datetime
else:
# Otherwise we already have the values
start = a
end = b
# If an interval integer was given then use in truncation.
truncated_data = self.data.sort_index()[start:end:int]
# Truncate the metadata
# Check there is data still
truncated_meta = TimeSeriesMetaData([])
if len(truncated_data) > 0:
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
# Build similar TimeSeries object and sanatise metadata and units.
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def extract(self, column_name):
"""Returns a new time series with the chosen column.
Parameters
----------
column_name : `str`
A valid column name.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected column.
"""
"""
# TODO allow the extract function to pick more than one column
if isinstance(self, pandas.Series):
return self
else:
return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))
"""
# Extract column and remove empty rows
data = self.data[[column_name]].dropna()
# Build generic TimeSeries object and sanatise metadata and units.
object = GenericTimeSeries(data.sort_index(),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def concatenate(self, otherts, **kwargs):
"""Concatenate with another TimeSeries. This function will check and
remove any duplicate times. It will keep the column values from the
original time series to which the new time series is being added.
Parameters
----------
otherts : `~sunpy.timeseries.TimeSeries`
Another time series.
same_source : `bool` Optional
Set to true to check if the sources of the time series match.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series.
Debate: decide if we want to be able to concatenate multiple time series
at once.
"""
# check to see if nothing needs to be done
if self == otherts:
return self
# Check the sources match if specified.
same_source = kwargs.get('same_source', False)
if same_source and not (isinstance(otherts, self.__class__)):
raise TypeError("TimeSeries classes must match if specified.")
# Concatenate the metadata and data
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
# Add all the new units to the dictionary.
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
# If sources match then build similar TimeSeries.
if self.__class__ == otherts.__class__:
object = self.__class__(data.sort_index(), meta, units)
else:
# Build generic time series if the sources don't match.
object = GenericTimeSeries(data.sort_index(), meta, units)
# Sanatise metadata and units
object._sanitize_metadata()
object._sanitize_units()
return object
# #### Plotting Methods #### #
# MASKED: plot function (lines 367-392)
def peek(self, **kwargs):
"""Displays the time series in a new figure.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
# Now make the plot
figure = plt.figure()
self.plot(**kwargs)
figure.show()
def _validate_data_for_ploting(self):
"""Raises an exception if the timeseries is invalid for plotting.
To be added into all the peek methods in all source sup-classes.
Currently only checks if we have an empty timeseries, where:
len(self.data) == 0
"""
# Check we have a valid TS
if len(self.data) == 0:
raise ValueError("The timeseries can't be plotted as it has no data present. "
"(len(self.data) == 0)")
# #### Miscellaneous #### #
def _validate_meta(self):
"""
Validates the meta-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and
u.Unit(self.meta.get(meta_property),
parse_strict='silent').physical_type == 'unknown'):
warnings.warn(f"Unknown value for {meta_property.upper()}.", SunpyUserWarning)
def _validate_units(self, units, **kwargs):
"""
Validates the astropy unit-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
result = True
for key in units:
if not isinstance(units[key], astropy.units.UnitBase):
# If this is not a unit then this can't be a valid units dict.
result = False
warnings.warn(f"Invalid unit given for {key}.", SunpyUserWarning)
return result
def _sanitize_units(self, **kwargs):
"""
Sanitises the collections.OrderedDict used to store the units.
Primarily this method will:
Remove entries that don't match up to a column,
Add unitless entries for columns with no units defined.
Re-arrange the order of the dictionary to match the columns.
"""
warnings.simplefilter('always', Warning)
# Populate unspecified units:
for column in set(self.data.columns.tolist()) - set(self.units.keys()):
# For all columns not present in the units dictionary.
self.units[column] = u.dimensionless_unscaled
warnings.warn(f"Unknown units for {column}.", SunpyUserWarning)
# Re-arrange so it's in the same order as the columns and removed unused.
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column:self.units[column]})
# Now use the amended units Ordered Dictionary
self.units = units
def _sanitize_metadata(self, **kwargs):
"""
Sanitises the TimeSeriesMetaData object used to store the metadata.
Primarily this method will:
Remove entries outside of the datas TimeRange or truncate TimeRanges
if the metadata overflows past the data,
Remove column references in the metadata that don't match to a column
in the data.
Remove metadata entries that have no columns matching the data.
"""
warnings.simplefilter('always', Warning)
# Truncate the metadata
self.meta._truncate(self.time_range)
# Remove non-existant columns
redundant_cols = list(set(self.meta.columns) - set(self.columns))
self.meta._remove_columns(redundant_cols)
# #### Export/Output Methods #### #
def to_table(self, **kwargs):
"""
Return an Astropy Table of the give TimeSeries object.
Returns
-------
newtable : `~astrpy.table`
A new astropy table containing the data from the time series.
The table will include units where relevant.
"""
# ToDo: Table.from_pandas(df) doesn't include the index column. Add request?
# Get data columns
table = Table.from_pandas(self.data)
# Get index column and add to table.
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
# Add in units.
for key in self.units:
table[key].unit = self.units[key]
# Output the table
return table
def to_dataframe(self, **kwargs):
"""
Return a Pandas DataFrame of the give TimeSeries object.
Returns
-------
newdf : `~pandas.core.frame.DataFrame`
A Pandas Dataframe containing the data.
"""
return self.data
def to_array(self, columns=None):
"""
Return a numpy array of the give TimeSeries object.
Parameters
----------
columns: `list`, optional, default:None
If None, return all columns minus the index, otherwise, returns
specified columns.
Returns
-------
values : `~numpy.ndarray`
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
"""
if columns:
return self.data.values[columns]
else:
return self.data.values
def __eq__(self, other):
"""
Check two TimeSeries objects are the same, they have matching type, data,
metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or
(self.meta != other.meta) or
(self.units != other.units)):
match = False
else:
match = False
return match
def __ne__(self, other):
"""
Check two TimeSeries objects are not the same, they don't have matching
type, data, metadata and/or units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
return not self == other
@classmethod
def _parse_file(cls, filepath):
"""Parses a file - to be implemented in any subclass that may use files"""
return NotImplemented
|
def plot(self, axes=None, **plot_args):
"""Plot a plot of the time series
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if axes is None:
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes
| 367 | 392 |
"""
TimeSeries is a generic time series class from which all other TimeSeries
classes inherit from.
"""
import copy
import warnings
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
import astropy
import astropy.units as u
from astropy.table import Table, Column
from sunpy import config
from sunpy.time import TimeRange
from sunpy.timeseries import TimeSeriesMetaData
from sunpy.util.metadata import MetaDict
from sunpy.util.exceptions import SunpyUserWarning
# define and register a new unit, needed for RHESSI
det = u.def_unit('detector')
u.add_enabled_units([det])
TIME_FORMAT = config.get("general", "time_format")
class GenericTimeSeries:
"""
A generic time series object.
Parameters
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional
The metadata giving details about the time series data/instrument.
units : dict, optional
A mapping from column names in *data* to the physical units of
that column.
Attributes
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`
The metadata giving details about the time series data/instrument.
units : dict
A mapping from column names in *data* to the physical units of
that column.
Examples
--------
>>> from sunpy.timeseries import TimeSeries
>>> from sunpy.time import parse_time
>>> import datetime
>>> from astropy.time import TimeDelta
>>> import numpy as np
>>> import pandas as pd
>>> base = parse_time(datetime.datetime.today())
>>> times = base - TimeDelta(np.arange(24 * 60)*u.minute)
>>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))
>>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])
>>> ts = TimeSeries(df)
>>> ts.peek() # doctest: +SKIP
References
----------
* `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = None
_registry = dict()
def __init_subclass__(cls, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `is_datasource_for`
attribute. This is then passed into the TimeSeries Factory so we can register them.
"""
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for
def __init__(self, data, meta=None, units=None, **kwargs):
self.data = data
tr = self.time_range
# Check metadata input
if meta is None:
# No meta given, so default
self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))
elif isinstance(meta, (dict, OrderedDict, MetaDict)):
# Given the values for metadata (dict) and infer timerange and colnames from the data
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
elif isinstance(meta, tuple):
# Given the values all in a tuple
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
else:
# Should have a list of 3-tuples giving a complex metadata list.
self.meta = meta
if units is None:
self.units = {}
else:
self.units = units
# Validate input data
# self._validate_meta()
# self._validate_units()
# #### Attribute definitions #### #
@property
def source(self):
"""
A string/object used to specify the source class of the TimeSeries.
"""
return self._source
@property
def columns(self):
"""A list of all the names of the columns in the data."""
return list(self.data.columns.values)
@property
def index(self):
"""The time index of the data."""
return self.data.index
@property
def time_range(self):
"""
The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`
object
"""
if len(self.data)>0:
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None
# #### Data Access, Selection and Organisation Methods #### #
def quantity(self, colname, **kwargs):
"""
Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want output.
Returns
-------
quantity : `~astropy.units.quantity.Quantity`
"""
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit)
def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
"""
Return an new TimeSeries with the given column added or updated.
Parameters
----------
colname : `str`
The heading of the column you want output.
quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`
The values to be placed within the column.
If updating values only then a numpy array is permitted.
overwrite : `bool`, optional, default:True
Set to true to allow the method to overwrite a column already present
in the TimeSeries.
Returns
-------
newts : TimeSeries
"""
# Get the expected units from the quantity if required
if not unit and isinstance(quantity, astropy.units.quantity.Quantity):
unit = quantity.unit
elif not unit:
unit = u.dimensionless_unscaled
# Make a copy of all the TimeSeries components.
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
# Add the unit to the units dictionary if already there.
if not (colname in self.data.columns):
units[colname] = unit
# Convert the given quantity into values for given units if necessary.
values = quantity
if isinstance(values, astropy.units.quantity.Quantity) and overwrite:
values = values.to(units[colname]).value
# Update or add the data.
if not (colname in self.data.columns) or overwrite:
data[colname] = values
# Return a new TimeSeries with the given updated/added column.
return self.__class__(data, meta, units)
def sort_index(self, **kwargs):
"""Returns a sorted version of the TimeSeries object.
Generally this shouldn't be necessary as most TimeSeries operations sort
the data anyway to ensure consistent behaviour when truncating.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series in ascending chronological order.
"""
return GenericTimeSeries(self.data.sort_index(**kwargs),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
def truncate(self, a, b=None, int=None):
"""Returns a truncated version of the TimeSeries object.
Parameters
----------
a : `sunpy.time.TimeRange`, `str` or `int`
Either a time range to truncate to, or a start time in some format
recognised by pandas, or a index integer.
b : `str` or `int`
If specified, the end time of the time range in some format
recognised by pandas, or a index integer.
int : `int`
If specified, the integer indicating the slicing intervals.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected times.
"""
# Evaluate inputs
# If given strings, then use to create a sunpy.time.timerange.TimeRange
# for the SunPy text date parser.
if isinstance(a, str) and isinstance(b, str):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
# If we have a TimeRange, extract the values
start = a.start.datetime
end = a.end.datetime
else:
# Otherwise we already have the values
start = a
end = b
# If an interval integer was given then use in truncation.
truncated_data = self.data.sort_index()[start:end:int]
# Truncate the metadata
# Check there is data still
truncated_meta = TimeSeriesMetaData([])
if len(truncated_data) > 0:
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
# Build similar TimeSeries object and sanatise metadata and units.
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def extract(self, column_name):
"""Returns a new time series with the chosen column.
Parameters
----------
column_name : `str`
A valid column name.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected column.
"""
"""
# TODO allow the extract function to pick more than one column
if isinstance(self, pandas.Series):
return self
else:
return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))
"""
# Extract column and remove empty rows
data = self.data[[column_name]].dropna()
# Build generic TimeSeries object and sanatise metadata and units.
object = GenericTimeSeries(data.sort_index(),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def concatenate(self, otherts, **kwargs):
"""Concatenate with another TimeSeries. This function will check and
remove any duplicate times. It will keep the column values from the
original time series to which the new time series is being added.
Parameters
----------
otherts : `~sunpy.timeseries.TimeSeries`
Another time series.
same_source : `bool` Optional
Set to true to check if the sources of the time series match.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series.
Debate: decide if we want to be able to concatenate multiple time series
at once.
"""
# check to see if nothing needs to be done
if self == otherts:
return self
# Check the sources match if specified.
same_source = kwargs.get('same_source', False)
if same_source and not (isinstance(otherts, self.__class__)):
raise TypeError("TimeSeries classes must match if specified.")
# Concatenate the metadata and data
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
# Add all the new units to the dictionary.
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
# If sources match then build similar TimeSeries.
if self.__class__ == otherts.__class__:
object = self.__class__(data.sort_index(), meta, units)
else:
# Build generic time series if the sources don't match.
object = GenericTimeSeries(data.sort_index(), meta, units)
# Sanatise metadata and units
object._sanitize_metadata()
object._sanitize_units()
return object
# #### Plotting Methods #### #
def plot(self, axes=None, **plot_args):
"""Plot a plot of the time series
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if axes is None:
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes
def peek(self, **kwargs):
"""Displays the time series in a new figure.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
# Now make the plot
figure = plt.figure()
self.plot(**kwargs)
figure.show()
def _validate_data_for_ploting(self):
"""Raises an exception if the timeseries is invalid for plotting.
To be added into all the peek methods in all source sup-classes.
Currently only checks if we have an empty timeseries, where:
len(self.data) == 0
"""
# Check we have a valid TS
if len(self.data) == 0:
raise ValueError("The timeseries can't be plotted as it has no data present. "
"(len(self.data) == 0)")
# #### Miscellaneous #### #
def _validate_meta(self):
"""
Validates the meta-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and
u.Unit(self.meta.get(meta_property),
parse_strict='silent').physical_type == 'unknown'):
warnings.warn(f"Unknown value for {meta_property.upper()}.", SunpyUserWarning)
def _validate_units(self, units, **kwargs):
"""
Validates the astropy unit-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
result = True
for key in units:
if not isinstance(units[key], astropy.units.UnitBase):
# If this is not a unit then this can't be a valid units dict.
result = False
warnings.warn(f"Invalid unit given for {key}.", SunpyUserWarning)
return result
def _sanitize_units(self, **kwargs):
"""
Sanitises the collections.OrderedDict used to store the units.
Primarily this method will:
Remove entries that don't match up to a column,
Add unitless entries for columns with no units defined.
Re-arrange the order of the dictionary to match the columns.
"""
warnings.simplefilter('always', Warning)
# Populate unspecified units:
for column in set(self.data.columns.tolist()) - set(self.units.keys()):
# For all columns not present in the units dictionary.
self.units[column] = u.dimensionless_unscaled
warnings.warn(f"Unknown units for {column}.", SunpyUserWarning)
# Re-arrange so it's in the same order as the columns and removed unused.
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column:self.units[column]})
# Now use the amended units Ordered Dictionary
self.units = units
def _sanitize_metadata(self, **kwargs):
"""
Sanitises the TimeSeriesMetaData object used to store the metadata.
Primarily this method will:
Remove entries outside of the datas TimeRange or truncate TimeRanges
if the metadata overflows past the data,
Remove column references in the metadata that don't match to a column
in the data.
Remove metadata entries that have no columns matching the data.
"""
warnings.simplefilter('always', Warning)
# Truncate the metadata
self.meta._truncate(self.time_range)
# Remove non-existant columns
redundant_cols = list(set(self.meta.columns) - set(self.columns))
self.meta._remove_columns(redundant_cols)
# #### Export/Output Methods #### #
def to_table(self, **kwargs):
"""
Return an Astropy Table of the give TimeSeries object.
Returns
-------
newtable : `~astrpy.table`
A new astropy table containing the data from the time series.
The table will include units where relevant.
"""
# ToDo: Table.from_pandas(df) doesn't include the index column. Add request?
# Get data columns
table = Table.from_pandas(self.data)
# Get index column and add to table.
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
# Add in units.
for key in self.units:
table[key].unit = self.units[key]
# Output the table
return table
def to_dataframe(self, **kwargs):
"""
Return a Pandas DataFrame of the give TimeSeries object.
Returns
-------
newdf : `~pandas.core.frame.DataFrame`
A Pandas Dataframe containing the data.
"""
return self.data
def to_array(self, columns=None):
"""
Return a numpy array of the give TimeSeries object.
Parameters
----------
columns: `list`, optional, default:None
If None, return all columns minus the index, otherwise, returns
specified columns.
Returns
-------
values : `~numpy.ndarray`
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
"""
if columns:
return self.data.values[columns]
else:
return self.data.values
def __eq__(self, other):
"""
Check two TimeSeries objects are the same, they have matching type, data,
metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or
(self.meta != other.meta) or
(self.units != other.units)):
match = False
else:
match = False
return match
def __ne__(self, other):
"""
Check two TimeSeries objects are not the same, they don't have matching
type, data, metadata and/or units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
return not self == other
@classmethod
def _parse_file(cls, filepath):
"""Parses a file - to be implemented in any subclass that may use files"""
return NotImplemented
|
_validate_meta
|
Validates the meta-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
|
"""
TimeSeries is a generic time series class from which all other TimeSeries
classes inherit from.
"""
import copy
import warnings
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
import astropy
import astropy.units as u
from astropy.table import Table, Column
from sunpy import config
from sunpy.time import TimeRange
from sunpy.timeseries import TimeSeriesMetaData
from sunpy.util.metadata import MetaDict
from sunpy.util.exceptions import SunpyUserWarning
# define and register a new unit, needed for RHESSI
det = u.def_unit('detector')
u.add_enabled_units([det])
TIME_FORMAT = config.get("general", "time_format")
class GenericTimeSeries:
"""
A generic time series object.
Parameters
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional
The metadata giving details about the time series data/instrument.
units : dict, optional
A mapping from column names in *data* to the physical units of
that column.
Attributes
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`
The metadata giving details about the time series data/instrument.
units : dict
A mapping from column names in *data* to the physical units of
that column.
Examples
--------
>>> from sunpy.timeseries import TimeSeries
>>> from sunpy.time import parse_time
>>> import datetime
>>> from astropy.time import TimeDelta
>>> import numpy as np
>>> import pandas as pd
>>> base = parse_time(datetime.datetime.today())
>>> times = base - TimeDelta(np.arange(24 * 60)*u.minute)
>>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))
>>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])
>>> ts = TimeSeries(df)
>>> ts.peek() # doctest: +SKIP
References
----------
* `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = None
_registry = dict()
def __init_subclass__(cls, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `is_datasource_for`
attribute. This is then passed into the TimeSeries Factory so we can register them.
"""
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for
def __init__(self, data, meta=None, units=None, **kwargs):
self.data = data
tr = self.time_range
# Check metadata input
if meta is None:
# No meta given, so default
self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))
elif isinstance(meta, (dict, OrderedDict, MetaDict)):
# Given the values for metadata (dict) and infer timerange and colnames from the data
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
elif isinstance(meta, tuple):
# Given the values all in a tuple
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
else:
# Should have a list of 3-tuples giving a complex metadata list.
self.meta = meta
if units is None:
self.units = {}
else:
self.units = units
# Validate input data
# self._validate_meta()
# self._validate_units()
# #### Attribute definitions #### #
@property
def source(self):
"""
A string/object used to specify the source class of the TimeSeries.
"""
return self._source
@property
def columns(self):
"""A list of all the names of the columns in the data."""
return list(self.data.columns.values)
@property
def index(self):
"""The time index of the data."""
return self.data.index
@property
def time_range(self):
"""
The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`
object
"""
if len(self.data)>0:
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None
# #### Data Access, Selection and Organisation Methods #### #
def quantity(self, colname, **kwargs):
"""
Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want output.
Returns
-------
quantity : `~astropy.units.quantity.Quantity`
"""
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit)
def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
"""
Return an new TimeSeries with the given column added or updated.
Parameters
----------
colname : `str`
The heading of the column you want output.
quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`
The values to be placed within the column.
If updating values only then a numpy array is permitted.
overwrite : `bool`, optional, default:True
Set to true to allow the method to overwrite a column already present
in the TimeSeries.
Returns
-------
newts : TimeSeries
"""
# Get the expected units from the quantity if required
if not unit and isinstance(quantity, astropy.units.quantity.Quantity):
unit = quantity.unit
elif not unit:
unit = u.dimensionless_unscaled
# Make a copy of all the TimeSeries components.
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
# Add the unit to the units dictionary if already there.
if not (colname in self.data.columns):
units[colname] = unit
# Convert the given quantity into values for given units if necessary.
values = quantity
if isinstance(values, astropy.units.quantity.Quantity) and overwrite:
values = values.to(units[colname]).value
# Update or add the data.
if not (colname in self.data.columns) or overwrite:
data[colname] = values
# Return a new TimeSeries with the given updated/added column.
return self.__class__(data, meta, units)
def sort_index(self, **kwargs):
"""Returns a sorted version of the TimeSeries object.
Generally this shouldn't be necessary as most TimeSeries operations sort
the data anyway to ensure consistent behaviour when truncating.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series in ascending chronological order.
"""
return GenericTimeSeries(self.data.sort_index(**kwargs),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
def truncate(self, a, b=None, int=None):
"""Returns a truncated version of the TimeSeries object.
Parameters
----------
a : `sunpy.time.TimeRange`, `str` or `int`
Either a time range to truncate to, or a start time in some format
recognised by pandas, or a index integer.
b : `str` or `int`
If specified, the end time of the time range in some format
recognised by pandas, or a index integer.
int : `int`
If specified, the integer indicating the slicing intervals.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected times.
"""
# Evaluate inputs
# If given strings, then use to create a sunpy.time.timerange.TimeRange
# for the SunPy text date parser.
if isinstance(a, str) and isinstance(b, str):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
# If we have a TimeRange, extract the values
start = a.start.datetime
end = a.end.datetime
else:
# Otherwise we already have the values
start = a
end = b
# If an interval integer was given then use in truncation.
truncated_data = self.data.sort_index()[start:end:int]
# Truncate the metadata
# Check there is data still
truncated_meta = TimeSeriesMetaData([])
if len(truncated_data) > 0:
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
# Build similar TimeSeries object and sanatise metadata and units.
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def extract(self, column_name):
"""Returns a new time series with the chosen column.
Parameters
----------
column_name : `str`
A valid column name.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected column.
"""
"""
# TODO allow the extract function to pick more than one column
if isinstance(self, pandas.Series):
return self
else:
return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))
"""
# Extract column and remove empty rows
data = self.data[[column_name]].dropna()
# Build generic TimeSeries object and sanatise metadata and units.
object = GenericTimeSeries(data.sort_index(),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def concatenate(self, otherts, **kwargs):
"""Concatenate with another TimeSeries. This function will check and
remove any duplicate times. It will keep the column values from the
original time series to which the new time series is being added.
Parameters
----------
otherts : `~sunpy.timeseries.TimeSeries`
Another time series.
same_source : `bool` Optional
Set to true to check if the sources of the time series match.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series.
Debate: decide if we want to be able to concatenate multiple time series
at once.
"""
# check to see if nothing needs to be done
if self == otherts:
return self
# Check the sources match if specified.
same_source = kwargs.get('same_source', False)
if same_source and not (isinstance(otherts, self.__class__)):
raise TypeError("TimeSeries classes must match if specified.")
# Concatenate the metadata and data
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
# Add all the new units to the dictionary.
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
# If sources match then build similar TimeSeries.
if self.__class__ == otherts.__class__:
object = self.__class__(data.sort_index(), meta, units)
else:
# Build generic time series if the sources don't match.
object = GenericTimeSeries(data.sort_index(), meta, units)
# Sanatise metadata and units
object._sanitize_metadata()
object._sanitize_units()
return object
# #### Plotting Methods #### #
def plot(self, axes=None, **plot_args):
"""Plot a plot of the time series
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if axes is None:
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes
def peek(self, **kwargs):
"""Displays the time series in a new figure.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
# Now make the plot
figure = plt.figure()
self.plot(**kwargs)
figure.show()
def _validate_data_for_ploting(self):
"""Raises an exception if the timeseries is invalid for plotting.
To be added into all the peek methods in all source sup-classes.
Currently only checks if we have an empty timeseries, where:
len(self.data) == 0
"""
# Check we have a valid TS
if len(self.data) == 0:
raise ValueError("The timeseries can't be plotted as it has no data present. "
"(len(self.data) == 0)")
# #### Miscellaneous #### #
# MASKED: _validate_meta function (lines 424-445)
def _validate_units(self, units, **kwargs):
"""
Validates the astropy unit-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
result = True
for key in units:
if not isinstance(units[key], astropy.units.UnitBase):
# If this is not a unit then this can't be a valid units dict.
result = False
warnings.warn(f"Invalid unit given for {key}.", SunpyUserWarning)
return result
def _sanitize_units(self, **kwargs):
"""
Sanitises the collections.OrderedDict used to store the units.
Primarily this method will:
Remove entries that don't match up to a column,
Add unitless entries for columns with no units defined.
Re-arrange the order of the dictionary to match the columns.
"""
warnings.simplefilter('always', Warning)
# Populate unspecified units:
for column in set(self.data.columns.tolist()) - set(self.units.keys()):
# For all columns not present in the units dictionary.
self.units[column] = u.dimensionless_unscaled
warnings.warn(f"Unknown units for {column}.", SunpyUserWarning)
# Re-arrange so it's in the same order as the columns and removed unused.
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column:self.units[column]})
# Now use the amended units Ordered Dictionary
self.units = units
def _sanitize_metadata(self, **kwargs):
"""
Sanitises the TimeSeriesMetaData object used to store the metadata.
Primarily this method will:
Remove entries outside of the datas TimeRange or truncate TimeRanges
if the metadata overflows past the data,
Remove column references in the metadata that don't match to a column
in the data.
Remove metadata entries that have no columns matching the data.
"""
warnings.simplefilter('always', Warning)
# Truncate the metadata
self.meta._truncate(self.time_range)
# Remove non-existant columns
redundant_cols = list(set(self.meta.columns) - set(self.columns))
self.meta._remove_columns(redundant_cols)
# #### Export/Output Methods #### #
def to_table(self, **kwargs):
"""
Return an Astropy Table of the give TimeSeries object.
Returns
-------
newtable : `~astrpy.table`
A new astropy table containing the data from the time series.
The table will include units where relevant.
"""
# ToDo: Table.from_pandas(df) doesn't include the index column. Add request?
# Get data columns
table = Table.from_pandas(self.data)
# Get index column and add to table.
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
# Add in units.
for key in self.units:
table[key].unit = self.units[key]
# Output the table
return table
def to_dataframe(self, **kwargs):
"""
Return a Pandas DataFrame of the give TimeSeries object.
Returns
-------
newdf : `~pandas.core.frame.DataFrame`
A Pandas Dataframe containing the data.
"""
return self.data
def to_array(self, columns=None):
"""
Return a numpy array of the give TimeSeries object.
Parameters
----------
columns: `list`, optional, default:None
If None, return all columns minus the index, otherwise, returns
specified columns.
Returns
-------
values : `~numpy.ndarray`
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
"""
if columns:
return self.data.values[columns]
else:
return self.data.values
def __eq__(self, other):
"""
Check two TimeSeries objects are the same, they have matching type, data,
metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or
(self.meta != other.meta) or
(self.units != other.units)):
match = False
else:
match = False
return match
def __ne__(self, other):
"""
Check two TimeSeries objects are not the same, they don't have matching
type, data, metadata and/or units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
return not self == other
@classmethod
def _parse_file(cls, filepath):
"""Parses a file - to be implemented in any subclass that may use files"""
return NotImplemented
|
def _validate_meta(self):
"""
Validates the meta-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and
u.Unit(self.meta.get(meta_property),
parse_strict='silent').physical_type == 'unknown'):
warnings.warn(f"Unknown value for {meta_property.upper()}.", SunpyUserWarning)
| 424 | 445 |
"""
TimeSeries is a generic time series class from which all other TimeSeries
classes inherit from.
"""
import copy
import warnings
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
import astropy
import astropy.units as u
from astropy.table import Table, Column
from sunpy import config
from sunpy.time import TimeRange
from sunpy.timeseries import TimeSeriesMetaData
from sunpy.util.metadata import MetaDict
from sunpy.util.exceptions import SunpyUserWarning
# define and register a new unit, needed for RHESSI
det = u.def_unit('detector')
u.add_enabled_units([det])
TIME_FORMAT = config.get("general", "time_format")
class GenericTimeSeries:
"""
A generic time series object.
Parameters
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional
The metadata giving details about the time series data/instrument.
units : dict, optional
A mapping from column names in *data* to the physical units of
that column.
Attributes
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`
The metadata giving details about the time series data/instrument.
units : dict
A mapping from column names in *data* to the physical units of
that column.
Examples
--------
>>> from sunpy.timeseries import TimeSeries
>>> from sunpy.time import parse_time
>>> import datetime
>>> from astropy.time import TimeDelta
>>> import numpy as np
>>> import pandas as pd
>>> base = parse_time(datetime.datetime.today())
>>> times = base - TimeDelta(np.arange(24 * 60)*u.minute)
>>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))
>>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])
>>> ts = TimeSeries(df)
>>> ts.peek() # doctest: +SKIP
References
----------
* `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = None
_registry = dict()
def __init_subclass__(cls, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `is_datasource_for`
attribute. This is then passed into the TimeSeries Factory so we can register them.
"""
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for
def __init__(self, data, meta=None, units=None, **kwargs):
self.data = data
tr = self.time_range
# Check metadata input
if meta is None:
# No meta given, so default
self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))
elif isinstance(meta, (dict, OrderedDict, MetaDict)):
# Given the values for metadata (dict) and infer timerange and colnames from the data
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
elif isinstance(meta, tuple):
# Given the values all in a tuple
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
else:
# Should have a list of 3-tuples giving a complex metadata list.
self.meta = meta
if units is None:
self.units = {}
else:
self.units = units
# Validate input data
# self._validate_meta()
# self._validate_units()
# #### Attribute definitions #### #
@property
def source(self):
"""
A string/object used to specify the source class of the TimeSeries.
"""
return self._source
@property
def columns(self):
"""A list of all the names of the columns in the data."""
return list(self.data.columns.values)
@property
def index(self):
"""The time index of the data."""
return self.data.index
@property
def time_range(self):
"""
The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`
object
"""
if len(self.data)>0:
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None
# #### Data Access, Selection and Organisation Methods #### #
def quantity(self, colname, **kwargs):
"""
Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want output.
Returns
-------
quantity : `~astropy.units.quantity.Quantity`
"""
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit)
def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
"""
Return an new TimeSeries with the given column added or updated.
Parameters
----------
colname : `str`
The heading of the column you want output.
quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`
The values to be placed within the column.
If updating values only then a numpy array is permitted.
overwrite : `bool`, optional, default:True
Set to true to allow the method to overwrite a column already present
in the TimeSeries.
Returns
-------
newts : TimeSeries
"""
# Get the expected units from the quantity if required
if not unit and isinstance(quantity, astropy.units.quantity.Quantity):
unit = quantity.unit
elif not unit:
unit = u.dimensionless_unscaled
# Make a copy of all the TimeSeries components.
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
# Add the unit to the units dictionary if already there.
if not (colname in self.data.columns):
units[colname] = unit
# Convert the given quantity into values for given units if necessary.
values = quantity
if isinstance(values, astropy.units.quantity.Quantity) and overwrite:
values = values.to(units[colname]).value
# Update or add the data.
if not (colname in self.data.columns) or overwrite:
data[colname] = values
# Return a new TimeSeries with the given updated/added column.
return self.__class__(data, meta, units)
def sort_index(self, **kwargs):
"""Returns a sorted version of the TimeSeries object.
Generally this shouldn't be necessary as most TimeSeries operations sort
the data anyway to ensure consistent behaviour when truncating.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series in ascending chronological order.
"""
return GenericTimeSeries(self.data.sort_index(**kwargs),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
def truncate(self, a, b=None, int=None):
"""Returns a truncated version of the TimeSeries object.
Parameters
----------
a : `sunpy.time.TimeRange`, `str` or `int`
Either a time range to truncate to, or a start time in some format
recognised by pandas, or a index integer.
b : `str` or `int`
If specified, the end time of the time range in some format
recognised by pandas, or a index integer.
int : `int`
If specified, the integer indicating the slicing intervals.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected times.
"""
# Evaluate inputs
# If given strings, then use to create a sunpy.time.timerange.TimeRange
# for the SunPy text date parser.
if isinstance(a, str) and isinstance(b, str):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
# If we have a TimeRange, extract the values
start = a.start.datetime
end = a.end.datetime
else:
# Otherwise we already have the values
start = a
end = b
# If an interval integer was given then use in truncation.
truncated_data = self.data.sort_index()[start:end:int]
# Truncate the metadata
# Check there is data still
truncated_meta = TimeSeriesMetaData([])
if len(truncated_data) > 0:
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
# Build similar TimeSeries object and sanatise metadata and units.
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def extract(self, column_name):
"""Returns a new time series with the chosen column.
Parameters
----------
column_name : `str`
A valid column name.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected column.
"""
"""
# TODO allow the extract function to pick more than one column
if isinstance(self, pandas.Series):
return self
else:
return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))
"""
# Extract column and remove empty rows
data = self.data[[column_name]].dropna()
# Build generic TimeSeries object and sanatise metadata and units.
object = GenericTimeSeries(data.sort_index(),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def concatenate(self, otherts, **kwargs):
"""Concatenate with another TimeSeries. This function will check and
remove any duplicate times. It will keep the column values from the
original time series to which the new time series is being added.
Parameters
----------
otherts : `~sunpy.timeseries.TimeSeries`
Another time series.
same_source : `bool` Optional
Set to true to check if the sources of the time series match.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series.
Debate: decide if we want to be able to concatenate multiple time series
at once.
"""
# check to see if nothing needs to be done
if self == otherts:
return self
# Check the sources match if specified.
same_source = kwargs.get('same_source', False)
if same_source and not (isinstance(otherts, self.__class__)):
raise TypeError("TimeSeries classes must match if specified.")
# Concatenate the metadata and data
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
# Add all the new units to the dictionary.
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
# If sources match then build similar TimeSeries.
if self.__class__ == otherts.__class__:
object = self.__class__(data.sort_index(), meta, units)
else:
# Build generic time series if the sources don't match.
object = GenericTimeSeries(data.sort_index(), meta, units)
# Sanatise metadata and units
object._sanitize_metadata()
object._sanitize_units()
return object
# #### Plotting Methods #### #
def plot(self, axes=None, **plot_args):
"""Plot a plot of the time series
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if axes is None:
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes
def peek(self, **kwargs):
"""Displays the time series in a new figure.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
# Now make the plot
figure = plt.figure()
self.plot(**kwargs)
figure.show()
def _validate_data_for_ploting(self):
"""Raises an exception if the timeseries is invalid for plotting.
To be added into all the peek methods in all source sup-classes.
Currently only checks if we have an empty timeseries, where:
len(self.data) == 0
"""
# Check we have a valid TS
if len(self.data) == 0:
raise ValueError("The timeseries can't be plotted as it has no data present. "
"(len(self.data) == 0)")
# #### Miscellaneous #### #
def _validate_meta(self):
"""
Validates the meta-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and
u.Unit(self.meta.get(meta_property),
parse_strict='silent').physical_type == 'unknown'):
warnings.warn(f"Unknown value for {meta_property.upper()}.", SunpyUserWarning)
def _validate_units(self, units, **kwargs):
"""
Validates the astropy unit-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
result = True
for key in units:
if not isinstance(units[key], astropy.units.UnitBase):
# If this is not a unit then this can't be a valid units dict.
result = False
warnings.warn(f"Invalid unit given for {key}.", SunpyUserWarning)
return result
def _sanitize_units(self, **kwargs):
"""
Sanitises the collections.OrderedDict used to store the units.
Primarily this method will:
Remove entries that don't match up to a column,
Add unitless entries for columns with no units defined.
Re-arrange the order of the dictionary to match the columns.
"""
warnings.simplefilter('always', Warning)
# Populate unspecified units:
for column in set(self.data.columns.tolist()) - set(self.units.keys()):
# For all columns not present in the units dictionary.
self.units[column] = u.dimensionless_unscaled
warnings.warn(f"Unknown units for {column}.", SunpyUserWarning)
# Re-arrange so it's in the same order as the columns and removed unused.
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column:self.units[column]})
# Now use the amended units Ordered Dictionary
self.units = units
def _sanitize_metadata(self, **kwargs):
"""
Sanitises the TimeSeriesMetaData object used to store the metadata.
Primarily this method will:
Remove entries outside of the datas TimeRange or truncate TimeRanges
if the metadata overflows past the data,
Remove column references in the metadata that don't match to a column
in the data.
Remove metadata entries that have no columns matching the data.
"""
warnings.simplefilter('always', Warning)
# Truncate the metadata
self.meta._truncate(self.time_range)
# Remove non-existant columns
redundant_cols = list(set(self.meta.columns) - set(self.columns))
self.meta._remove_columns(redundant_cols)
# #### Export/Output Methods #### #
def to_table(self, **kwargs):
"""
Return an Astropy Table of the give TimeSeries object.
Returns
-------
newtable : `~astrpy.table`
A new astropy table containing the data from the time series.
The table will include units where relevant.
"""
# ToDo: Table.from_pandas(df) doesn't include the index column. Add request?
# Get data columns
table = Table.from_pandas(self.data)
# Get index column and add to table.
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
# Add in units.
for key in self.units:
table[key].unit = self.units[key]
# Output the table
return table
def to_dataframe(self, **kwargs):
"""
Return a Pandas DataFrame of the give TimeSeries object.
Returns
-------
newdf : `~pandas.core.frame.DataFrame`
A Pandas Dataframe containing the data.
"""
return self.data
def to_array(self, columns=None):
"""
Return a numpy array of the give TimeSeries object.
Parameters
----------
columns: `list`, optional, default:None
If None, return all columns minus the index, otherwise, returns
specified columns.
Returns
-------
values : `~numpy.ndarray`
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
"""
if columns:
return self.data.values[columns]
else:
return self.data.values
def __eq__(self, other):
"""
Check two TimeSeries objects are the same, they have matching type, data,
metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or
(self.meta != other.meta) or
(self.units != other.units)):
match = False
else:
match = False
return match
def __ne__(self, other):
"""
Check two TimeSeries objects are not the same, they don't have matching
type, data, metadata and/or units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
return not self == other
@classmethod
def _parse_file(cls, filepath):
"""Parses a file - to be implemented in any subclass that may use files"""
return NotImplemented
|
_validate_units
|
Validates the astropy unit-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
|
"""
TimeSeries is a generic time series class from which all other TimeSeries
classes inherit from.
"""
import copy
import warnings
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
import astropy
import astropy.units as u
from astropy.table import Table, Column
from sunpy import config
from sunpy.time import TimeRange
from sunpy.timeseries import TimeSeriesMetaData
from sunpy.util.metadata import MetaDict
from sunpy.util.exceptions import SunpyUserWarning
# define and register a new unit, needed for RHESSI
det = u.def_unit('detector')
u.add_enabled_units([det])
TIME_FORMAT = config.get("general", "time_format")
class GenericTimeSeries:
"""
A generic time series object.
Parameters
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional
The metadata giving details about the time series data/instrument.
units : dict, optional
A mapping from column names in *data* to the physical units of
that column.
Attributes
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`
The metadata giving details about the time series data/instrument.
units : dict
A mapping from column names in *data* to the physical units of
that column.
Examples
--------
>>> from sunpy.timeseries import TimeSeries
>>> from sunpy.time import parse_time
>>> import datetime
>>> from astropy.time import TimeDelta
>>> import numpy as np
>>> import pandas as pd
>>> base = parse_time(datetime.datetime.today())
>>> times = base - TimeDelta(np.arange(24 * 60)*u.minute)
>>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))
>>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])
>>> ts = TimeSeries(df)
>>> ts.peek() # doctest: +SKIP
References
----------
* `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = None
_registry = dict()
def __init_subclass__(cls, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `is_datasource_for`
attribute. This is then passed into the TimeSeries Factory so we can register them.
"""
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for
def __init__(self, data, meta=None, units=None, **kwargs):
self.data = data
tr = self.time_range
# Check metadata input
if meta is None:
# No meta given, so default
self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))
elif isinstance(meta, (dict, OrderedDict, MetaDict)):
# Given the values for metadata (dict) and infer timerange and colnames from the data
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
elif isinstance(meta, tuple):
# Given the values all in a tuple
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
else:
# Should have a list of 3-tuples giving a complex metadata list.
self.meta = meta
if units is None:
self.units = {}
else:
self.units = units
# Validate input data
# self._validate_meta()
# self._validate_units()
# #### Attribute definitions #### #
@property
def source(self):
"""
A string/object used to specify the source class of the TimeSeries.
"""
return self._source
@property
def columns(self):
"""A list of all the names of the columns in the data."""
return list(self.data.columns.values)
@property
def index(self):
"""The time index of the data."""
return self.data.index
@property
def time_range(self):
"""
The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`
object
"""
if len(self.data)>0:
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None
# #### Data Access, Selection and Organisation Methods #### #
def quantity(self, colname, **kwargs):
"""
Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want output.
Returns
-------
quantity : `~astropy.units.quantity.Quantity`
"""
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit)
def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
"""
Return an new TimeSeries with the given column added or updated.
Parameters
----------
colname : `str`
The heading of the column you want output.
quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`
The values to be placed within the column.
If updating values only then a numpy array is permitted.
overwrite : `bool`, optional, default:True
Set to true to allow the method to overwrite a column already present
in the TimeSeries.
Returns
-------
newts : TimeSeries
"""
# Get the expected units from the quantity if required
if not unit and isinstance(quantity, astropy.units.quantity.Quantity):
unit = quantity.unit
elif not unit:
unit = u.dimensionless_unscaled
# Make a copy of all the TimeSeries components.
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
# Add the unit to the units dictionary if already there.
if not (colname in self.data.columns):
units[colname] = unit
# Convert the given quantity into values for given units if necessary.
values = quantity
if isinstance(values, astropy.units.quantity.Quantity) and overwrite:
values = values.to(units[colname]).value
# Update or add the data.
if not (colname in self.data.columns) or overwrite:
data[colname] = values
# Return a new TimeSeries with the given updated/added column.
return self.__class__(data, meta, units)
def sort_index(self, **kwargs):
"""Returns a sorted version of the TimeSeries object.
Generally this shouldn't be necessary as most TimeSeries operations sort
the data anyway to ensure consistent behaviour when truncating.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series in ascending chronological order.
"""
return GenericTimeSeries(self.data.sort_index(**kwargs),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
def truncate(self, a, b=None, int=None):
"""Returns a truncated version of the TimeSeries object.
Parameters
----------
a : `sunpy.time.TimeRange`, `str` or `int`
Either a time range to truncate to, or a start time in some format
recognised by pandas, or a index integer.
b : `str` or `int`
If specified, the end time of the time range in some format
recognised by pandas, or a index integer.
int : `int`
If specified, the integer indicating the slicing intervals.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected times.
"""
# Evaluate inputs
# If given strings, then use to create a sunpy.time.timerange.TimeRange
# for the SunPy text date parser.
if isinstance(a, str) and isinstance(b, str):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
# If we have a TimeRange, extract the values
start = a.start.datetime
end = a.end.datetime
else:
# Otherwise we already have the values
start = a
end = b
# If an interval integer was given then use in truncation.
truncated_data = self.data.sort_index()[start:end:int]
# Truncate the metadata
# Check there is data still
truncated_meta = TimeSeriesMetaData([])
if len(truncated_data) > 0:
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
# Build similar TimeSeries object and sanatise metadata and units.
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def extract(self, column_name):
"""Returns a new time series with the chosen column.
Parameters
----------
column_name : `str`
A valid column name.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected column.
"""
"""
# TODO allow the extract function to pick more than one column
if isinstance(self, pandas.Series):
return self
else:
return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))
"""
# Extract column and remove empty rows
data = self.data[[column_name]].dropna()
# Build generic TimeSeries object and sanatise metadata and units.
object = GenericTimeSeries(data.sort_index(),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def concatenate(self, otherts, **kwargs):
"""Concatenate with another TimeSeries. This function will check and
remove any duplicate times. It will keep the column values from the
original time series to which the new time series is being added.
Parameters
----------
otherts : `~sunpy.timeseries.TimeSeries`
Another time series.
same_source : `bool` Optional
Set to true to check if the sources of the time series match.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series.
Debate: decide if we want to be able to concatenate multiple time series
at once.
"""
# check to see if nothing needs to be done
if self == otherts:
return self
# Check the sources match if specified.
same_source = kwargs.get('same_source', False)
if same_source and not (isinstance(otherts, self.__class__)):
raise TypeError("TimeSeries classes must match if specified.")
# Concatenate the metadata and data
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
# Add all the new units to the dictionary.
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
# If sources match then build similar TimeSeries.
if self.__class__ == otherts.__class__:
object = self.__class__(data.sort_index(), meta, units)
else:
# Build generic time series if the sources don't match.
object = GenericTimeSeries(data.sort_index(), meta, units)
# Sanatise metadata and units
object._sanitize_metadata()
object._sanitize_units()
return object
# #### Plotting Methods #### #
def plot(self, axes=None, **plot_args):
"""Plot a plot of the time series
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if axes is None:
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes
def peek(self, **kwargs):
"""Displays the time series in a new figure.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
# Now make the plot
figure = plt.figure()
self.plot(**kwargs)
figure.show()
def _validate_data_for_ploting(self):
"""Raises an exception if the timeseries is invalid for plotting.
To be added into all the peek methods in all source sup-classes.
Currently only checks if we have an empty timeseries, where:
len(self.data) == 0
"""
# Check we have a valid TS
if len(self.data) == 0:
raise ValueError("The timeseries can't be plotted as it has no data present. "
"(len(self.data) == 0)")
# #### Miscellaneous #### #
def _validate_meta(self):
"""
Validates the meta-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and
u.Unit(self.meta.get(meta_property),
parse_strict='silent').physical_type == 'unknown'):
warnings.warn(f"Unknown value for {meta_property.upper()}.", SunpyUserWarning)
# MASKED: _validate_units function (lines 447-470)
def _sanitize_units(self, **kwargs):
"""
Sanitises the collections.OrderedDict used to store the units.
Primarily this method will:
Remove entries that don't match up to a column,
Add unitless entries for columns with no units defined.
Re-arrange the order of the dictionary to match the columns.
"""
warnings.simplefilter('always', Warning)
# Populate unspecified units:
for column in set(self.data.columns.tolist()) - set(self.units.keys()):
# For all columns not present in the units dictionary.
self.units[column] = u.dimensionless_unscaled
warnings.warn(f"Unknown units for {column}.", SunpyUserWarning)
# Re-arrange so it's in the same order as the columns and removed unused.
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column:self.units[column]})
# Now use the amended units Ordered Dictionary
self.units = units
def _sanitize_metadata(self, **kwargs):
"""
Sanitises the TimeSeriesMetaData object used to store the metadata.
Primarily this method will:
Remove entries outside of the datas TimeRange or truncate TimeRanges
if the metadata overflows past the data,
Remove column references in the metadata that don't match to a column
in the data.
Remove metadata entries that have no columns matching the data.
"""
warnings.simplefilter('always', Warning)
# Truncate the metadata
self.meta._truncate(self.time_range)
# Remove non-existant columns
redundant_cols = list(set(self.meta.columns) - set(self.columns))
self.meta._remove_columns(redundant_cols)
# #### Export/Output Methods #### #
def to_table(self, **kwargs):
"""
Return an Astropy Table of the give TimeSeries object.
Returns
-------
newtable : `~astrpy.table`
A new astropy table containing the data from the time series.
The table will include units where relevant.
"""
# ToDo: Table.from_pandas(df) doesn't include the index column. Add request?
# Get data columns
table = Table.from_pandas(self.data)
# Get index column and add to table.
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
# Add in units.
for key in self.units:
table[key].unit = self.units[key]
# Output the table
return table
def to_dataframe(self, **kwargs):
"""
Return a Pandas DataFrame of the give TimeSeries object.
Returns
-------
newdf : `~pandas.core.frame.DataFrame`
A Pandas Dataframe containing the data.
"""
return self.data
def to_array(self, columns=None):
"""
Return a numpy array of the give TimeSeries object.
Parameters
----------
columns: `list`, optional, default:None
If None, return all columns minus the index, otherwise, returns
specified columns.
Returns
-------
values : `~numpy.ndarray`
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
"""
if columns:
return self.data.values[columns]
else:
return self.data.values
def __eq__(self, other):
"""
Check two TimeSeries objects are the same, they have matching type, data,
metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or
(self.meta != other.meta) or
(self.units != other.units)):
match = False
else:
match = False
return match
def __ne__(self, other):
"""
Check two TimeSeries objects are not the same, they don't have matching
type, data, metadata and/or units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
return not self == other
@classmethod
def _parse_file(cls, filepath):
"""Parses a file - to be implemented in any subclass that may use files"""
return NotImplemented
|
def _validate_units(self, units, **kwargs):
"""
Validates the astropy unit-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
result = True
for key in units:
if not isinstance(units[key], astropy.units.UnitBase):
# If this is not a unit then this can't be a valid units dict.
result = False
warnings.warn(f"Invalid unit given for {key}.", SunpyUserWarning)
return result
| 447 | 470 |
"""
TimeSeries is a generic time series class from which all other TimeSeries
classes inherit from.
"""
import copy
import warnings
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
import astropy
import astropy.units as u
from astropy.table import Table, Column
from sunpy import config
from sunpy.time import TimeRange
from sunpy.timeseries import TimeSeriesMetaData
from sunpy.util.metadata import MetaDict
from sunpy.util.exceptions import SunpyUserWarning
# define and register a new unit, needed for RHESSI
det = u.def_unit('detector')
u.add_enabled_units([det])
TIME_FORMAT = config.get("general", "time_format")
class GenericTimeSeries:
"""
A generic time series object.
Parameters
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional
The metadata giving details about the time series data/instrument.
units : dict, optional
A mapping from column names in *data* to the physical units of
that column.
Attributes
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`
The metadata giving details about the time series data/instrument.
units : dict
A mapping from column names in *data* to the physical units of
that column.
Examples
--------
>>> from sunpy.timeseries import TimeSeries
>>> from sunpy.time import parse_time
>>> import datetime
>>> from astropy.time import TimeDelta
>>> import numpy as np
>>> import pandas as pd
>>> base = parse_time(datetime.datetime.today())
>>> times = base - TimeDelta(np.arange(24 * 60)*u.minute)
>>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))
>>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])
>>> ts = TimeSeries(df)
>>> ts.peek() # doctest: +SKIP
References
----------
* `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = None
_registry = dict()
def __init_subclass__(cls, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `is_datasource_for`
attribute. This is then passed into the TimeSeries Factory so we can register them.
"""
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for
def __init__(self, data, meta=None, units=None, **kwargs):
self.data = data
tr = self.time_range
# Check metadata input
if meta is None:
# No meta given, so default
self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))
elif isinstance(meta, (dict, OrderedDict, MetaDict)):
# Given the values for metadata (dict) and infer timerange and colnames from the data
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
elif isinstance(meta, tuple):
# Given the values all in a tuple
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
else:
# Should have a list of 3-tuples giving a complex metadata list.
self.meta = meta
if units is None:
self.units = {}
else:
self.units = units
# Validate input data
# self._validate_meta()
# self._validate_units()
# #### Attribute definitions #### #
@property
def source(self):
"""
A string/object used to specify the source class of the TimeSeries.
"""
return self._source
@property
def columns(self):
"""A list of all the names of the columns in the data."""
return list(self.data.columns.values)
@property
def index(self):
"""The time index of the data."""
return self.data.index
@property
def time_range(self):
"""
The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`
object
"""
if len(self.data)>0:
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None
# #### Data Access, Selection and Organisation Methods #### #
def quantity(self, colname, **kwargs):
"""
Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want output.
Returns
-------
quantity : `~astropy.units.quantity.Quantity`
"""
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit)
def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
"""
Return an new TimeSeries with the given column added or updated.
Parameters
----------
colname : `str`
The heading of the column you want output.
quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`
The values to be placed within the column.
If updating values only then a numpy array is permitted.
overwrite : `bool`, optional, default:True
Set to true to allow the method to overwrite a column already present
in the TimeSeries.
Returns
-------
newts : TimeSeries
"""
# Get the expected units from the quantity if required
if not unit and isinstance(quantity, astropy.units.quantity.Quantity):
unit = quantity.unit
elif not unit:
unit = u.dimensionless_unscaled
# Make a copy of all the TimeSeries components.
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
# Add the unit to the units dictionary if already there.
if not (colname in self.data.columns):
units[colname] = unit
# Convert the given quantity into values for given units if necessary.
values = quantity
if isinstance(values, astropy.units.quantity.Quantity) and overwrite:
values = values.to(units[colname]).value
# Update or add the data.
if not (colname in self.data.columns) or overwrite:
data[colname] = values
# Return a new TimeSeries with the given updated/added column.
return self.__class__(data, meta, units)
def sort_index(self, **kwargs):
"""Returns a sorted version of the TimeSeries object.
Generally this shouldn't be necessary as most TimeSeries operations sort
the data anyway to ensure consistent behaviour when truncating.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series in ascending chronological order.
"""
return GenericTimeSeries(self.data.sort_index(**kwargs),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
def truncate(self, a, b=None, int=None):
"""Returns a truncated version of the TimeSeries object.
Parameters
----------
a : `sunpy.time.TimeRange`, `str` or `int`
Either a time range to truncate to, or a start time in some format
recognised by pandas, or a index integer.
b : `str` or `int`
If specified, the end time of the time range in some format
recognised by pandas, or a index integer.
int : `int`
If specified, the integer indicating the slicing intervals.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected times.
"""
# Evaluate inputs
# If given strings, then use to create a sunpy.time.timerange.TimeRange
# for the SunPy text date parser.
if isinstance(a, str) and isinstance(b, str):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
# If we have a TimeRange, extract the values
start = a.start.datetime
end = a.end.datetime
else:
# Otherwise we already have the values
start = a
end = b
# If an interval integer was given then use in truncation.
truncated_data = self.data.sort_index()[start:end:int]
# Truncate the metadata
# Check there is data still
truncated_meta = TimeSeriesMetaData([])
if len(truncated_data) > 0:
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
# Build similar TimeSeries object and sanatise metadata and units.
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def extract(self, column_name):
"""Returns a new time series with the chosen column.
Parameters
----------
column_name : `str`
A valid column name.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected column.
"""
"""
# TODO allow the extract function to pick more than one column
if isinstance(self, pandas.Series):
return self
else:
return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))
"""
# Extract column and remove empty rows
data = self.data[[column_name]].dropna()
# Build generic TimeSeries object and sanatise metadata and units.
object = GenericTimeSeries(data.sort_index(),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def concatenate(self, otherts, **kwargs):
"""Concatenate with another TimeSeries. This function will check and
remove any duplicate times. It will keep the column values from the
original time series to which the new time series is being added.
Parameters
----------
otherts : `~sunpy.timeseries.TimeSeries`
Another time series.
same_source : `bool` Optional
Set to true to check if the sources of the time series match.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series.
Debate: decide if we want to be able to concatenate multiple time series
at once.
"""
# check to see if nothing needs to be done
if self == otherts:
return self
# Check the sources match if specified.
same_source = kwargs.get('same_source', False)
if same_source and not (isinstance(otherts, self.__class__)):
raise TypeError("TimeSeries classes must match if specified.")
# Concatenate the metadata and data
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
# Add all the new units to the dictionary.
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
# If sources match then build similar TimeSeries.
if self.__class__ == otherts.__class__:
object = self.__class__(data.sort_index(), meta, units)
else:
# Build generic time series if the sources don't match.
object = GenericTimeSeries(data.sort_index(), meta, units)
# Sanatise metadata and units
object._sanitize_metadata()
object._sanitize_units()
return object
# #### Plotting Methods #### #
def plot(self, axes=None, **plot_args):
"""Plot a plot of the time series
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if axes is None:
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes
def peek(self, **kwargs):
"""Displays the time series in a new figure.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
# Now make the plot
figure = plt.figure()
self.plot(**kwargs)
figure.show()
def _validate_data_for_ploting(self):
"""Raises an exception if the timeseries is invalid for plotting.
To be added into all the peek methods in all source sup-classes.
Currently only checks if we have an empty timeseries, where:
len(self.data) == 0
"""
# Check we have a valid TS
if len(self.data) == 0:
raise ValueError("The timeseries can't be plotted as it has no data present. "
"(len(self.data) == 0)")
# #### Miscellaneous #### #
def _validate_meta(self):
"""
Validates the meta-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and
u.Unit(self.meta.get(meta_property),
parse_strict='silent').physical_type == 'unknown'):
warnings.warn(f"Unknown value for {meta_property.upper()}.", SunpyUserWarning)
def _validate_units(self, units, **kwargs):
"""
Validates the astropy unit-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
result = True
for key in units:
if not isinstance(units[key], astropy.units.UnitBase):
# If this is not a unit then this can't be a valid units dict.
result = False
warnings.warn(f"Invalid unit given for {key}.", SunpyUserWarning)
return result
def _sanitize_units(self, **kwargs):
"""
Sanitises the collections.OrderedDict used to store the units.
Primarily this method will:
Remove entries that don't match up to a column,
Add unitless entries for columns with no units defined.
Re-arrange the order of the dictionary to match the columns.
"""
warnings.simplefilter('always', Warning)
# Populate unspecified units:
for column in set(self.data.columns.tolist()) - set(self.units.keys()):
# For all columns not present in the units dictionary.
self.units[column] = u.dimensionless_unscaled
warnings.warn(f"Unknown units for {column}.", SunpyUserWarning)
# Re-arrange so it's in the same order as the columns and removed unused.
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column:self.units[column]})
# Now use the amended units Ordered Dictionary
self.units = units
def _sanitize_metadata(self, **kwargs):
"""
Sanitises the TimeSeriesMetaData object used to store the metadata.
Primarily this method will:
Remove entries outside of the datas TimeRange or truncate TimeRanges
if the metadata overflows past the data,
Remove column references in the metadata that don't match to a column
in the data.
Remove metadata entries that have no columns matching the data.
"""
warnings.simplefilter('always', Warning)
# Truncate the metadata
self.meta._truncate(self.time_range)
# Remove non-existant columns
redundant_cols = list(set(self.meta.columns) - set(self.columns))
self.meta._remove_columns(redundant_cols)
# #### Export/Output Methods #### #
def to_table(self, **kwargs):
"""
Return an Astropy Table of the give TimeSeries object.
Returns
-------
newtable : `~astrpy.table`
A new astropy table containing the data from the time series.
The table will include units where relevant.
"""
# ToDo: Table.from_pandas(df) doesn't include the index column. Add request?
# Get data columns
table = Table.from_pandas(self.data)
# Get index column and add to table.
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
# Add in units.
for key in self.units:
table[key].unit = self.units[key]
# Output the table
return table
def to_dataframe(self, **kwargs):
"""
Return a Pandas DataFrame of the give TimeSeries object.
Returns
-------
newdf : `~pandas.core.frame.DataFrame`
A Pandas Dataframe containing the data.
"""
return self.data
def to_array(self, columns=None):
"""
Return a numpy array of the give TimeSeries object.
Parameters
----------
columns: `list`, optional, default:None
If None, return all columns minus the index, otherwise, returns
specified columns.
Returns
-------
values : `~numpy.ndarray`
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
"""
if columns:
return self.data.values[columns]
else:
return self.data.values
def __eq__(self, other):
"""
Check two TimeSeries objects are the same, they have matching type, data,
metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or
(self.meta != other.meta) or
(self.units != other.units)):
match = False
else:
match = False
return match
def __ne__(self, other):
"""
Check two TimeSeries objects are not the same, they don't have matching
type, data, metadata and/or units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
return not self == other
@classmethod
def _parse_file(cls, filepath):
"""Parses a file - to be implemented in any subclass that may use files"""
return NotImplemented
|
_sanitize_units
|
Sanitises the collections.OrderedDict used to store the units.
Primarily this method will:
Remove entries that don't match up to a column,
Add unitless entries for columns with no units defined.
Re-arrange the order of the dictionary to match the columns.
|
"""
TimeSeries is a generic time series class from which all other TimeSeries
classes inherit from.
"""
import copy
import warnings
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
import astropy
import astropy.units as u
from astropy.table import Table, Column
from sunpy import config
from sunpy.time import TimeRange
from sunpy.timeseries import TimeSeriesMetaData
from sunpy.util.metadata import MetaDict
from sunpy.util.exceptions import SunpyUserWarning
# define and register a new unit, needed for RHESSI
det = u.def_unit('detector')
u.add_enabled_units([det])
TIME_FORMAT = config.get("general", "time_format")
class GenericTimeSeries:
"""
A generic time series object.
Parameters
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional
The metadata giving details about the time series data/instrument.
units : dict, optional
A mapping from column names in *data* to the physical units of
that column.
Attributes
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`
The metadata giving details about the time series data/instrument.
units : dict
A mapping from column names in *data* to the physical units of
that column.
Examples
--------
>>> from sunpy.timeseries import TimeSeries
>>> from sunpy.time import parse_time
>>> import datetime
>>> from astropy.time import TimeDelta
>>> import numpy as np
>>> import pandas as pd
>>> base = parse_time(datetime.datetime.today())
>>> times = base - TimeDelta(np.arange(24 * 60)*u.minute)
>>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))
>>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])
>>> ts = TimeSeries(df)
>>> ts.peek() # doctest: +SKIP
References
----------
* `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = None
_registry = dict()
def __init_subclass__(cls, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `is_datasource_for`
attribute. This is then passed into the TimeSeries Factory so we can register them.
"""
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for
def __init__(self, data, meta=None, units=None, **kwargs):
self.data = data
tr = self.time_range
# Check metadata input
if meta is None:
# No meta given, so default
self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))
elif isinstance(meta, (dict, OrderedDict, MetaDict)):
# Given the values for metadata (dict) and infer timerange and colnames from the data
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
elif isinstance(meta, tuple):
# Given the values all in a tuple
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
else:
# Should have a list of 3-tuples giving a complex metadata list.
self.meta = meta
if units is None:
self.units = {}
else:
self.units = units
# Validate input data
# self._validate_meta()
# self._validate_units()
# #### Attribute definitions #### #
@property
def source(self):
"""
A string/object used to specify the source class of the TimeSeries.
"""
return self._source
@property
def columns(self):
"""A list of all the names of the columns in the data."""
return list(self.data.columns.values)
@property
def index(self):
"""The time index of the data."""
return self.data.index
@property
def time_range(self):
"""
The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`
object
"""
if len(self.data)>0:
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None
# #### Data Access, Selection and Organisation Methods #### #
def quantity(self, colname, **kwargs):
"""
Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want output.
Returns
-------
quantity : `~astropy.units.quantity.Quantity`
"""
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit)
def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
"""
Return an new TimeSeries with the given column added or updated.
Parameters
----------
colname : `str`
The heading of the column you want output.
quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`
The values to be placed within the column.
If updating values only then a numpy array is permitted.
overwrite : `bool`, optional, default:True
Set to true to allow the method to overwrite a column already present
in the TimeSeries.
Returns
-------
newts : TimeSeries
"""
# Get the expected units from the quantity if required
if not unit and isinstance(quantity, astropy.units.quantity.Quantity):
unit = quantity.unit
elif not unit:
unit = u.dimensionless_unscaled
# Make a copy of all the TimeSeries components.
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
# Add the unit to the units dictionary if already there.
if not (colname in self.data.columns):
units[colname] = unit
# Convert the given quantity into values for given units if necessary.
values = quantity
if isinstance(values, astropy.units.quantity.Quantity) and overwrite:
values = values.to(units[colname]).value
# Update or add the data.
if not (colname in self.data.columns) or overwrite:
data[colname] = values
# Return a new TimeSeries with the given updated/added column.
return self.__class__(data, meta, units)
def sort_index(self, **kwargs):
"""Returns a sorted version of the TimeSeries object.
Generally this shouldn't be necessary as most TimeSeries operations sort
the data anyway to ensure consistent behaviour when truncating.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series in ascending chronological order.
"""
return GenericTimeSeries(self.data.sort_index(**kwargs),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
def truncate(self, a, b=None, int=None):
"""Returns a truncated version of the TimeSeries object.
Parameters
----------
a : `sunpy.time.TimeRange`, `str` or `int`
Either a time range to truncate to, or a start time in some format
recognised by pandas, or a index integer.
b : `str` or `int`
If specified, the end time of the time range in some format
recognised by pandas, or a index integer.
int : `int`
If specified, the integer indicating the slicing intervals.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected times.
"""
# Evaluate inputs
# If given strings, then use to create a sunpy.time.timerange.TimeRange
# for the SunPy text date parser.
if isinstance(a, str) and isinstance(b, str):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
# If we have a TimeRange, extract the values
start = a.start.datetime
end = a.end.datetime
else:
# Otherwise we already have the values
start = a
end = b
# If an interval integer was given then use in truncation.
truncated_data = self.data.sort_index()[start:end:int]
# Truncate the metadata
# Check there is data still
truncated_meta = TimeSeriesMetaData([])
if len(truncated_data) > 0:
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
# Build similar TimeSeries object and sanatise metadata and units.
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def extract(self, column_name):
"""Returns a new time series with the chosen column.
Parameters
----------
column_name : `str`
A valid column name.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected column.
"""
"""
# TODO allow the extract function to pick more than one column
if isinstance(self, pandas.Series):
return self
else:
return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))
"""
# Extract column and remove empty rows
data = self.data[[column_name]].dropna()
# Build generic TimeSeries object and sanatise metadata and units.
object = GenericTimeSeries(data.sort_index(),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def concatenate(self, otherts, **kwargs):
"""Concatenate with another TimeSeries. This function will check and
remove any duplicate times. It will keep the column values from the
original time series to which the new time series is being added.
Parameters
----------
otherts : `~sunpy.timeseries.TimeSeries`
Another time series.
same_source : `bool` Optional
Set to true to check if the sources of the time series match.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series.
Debate: decide if we want to be able to concatenate multiple time series
at once.
"""
# check to see if nothing needs to be done
if self == otherts:
return self
# Check the sources match if specified.
same_source = kwargs.get('same_source', False)
if same_source and not (isinstance(otherts, self.__class__)):
raise TypeError("TimeSeries classes must match if specified.")
# Concatenate the metadata and data
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
# Add all the new units to the dictionary.
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
# If sources match then build similar TimeSeries.
if self.__class__ == otherts.__class__:
object = self.__class__(data.sort_index(), meta, units)
else:
# Build generic time series if the sources don't match.
object = GenericTimeSeries(data.sort_index(), meta, units)
# Sanatise metadata and units
object._sanitize_metadata()
object._sanitize_units()
return object
# #### Plotting Methods #### #
def plot(self, axes=None, **plot_args):
"""Plot a plot of the time series
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if axes is None:
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes
def peek(self, **kwargs):
"""Displays the time series in a new figure.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
# Now make the plot
figure = plt.figure()
self.plot(**kwargs)
figure.show()
def _validate_data_for_ploting(self):
"""Raises an exception if the timeseries is invalid for plotting.
To be added into all the peek methods in all source sup-classes.
Currently only checks if we have an empty timeseries, where:
len(self.data) == 0
"""
# Check we have a valid TS
if len(self.data) == 0:
raise ValueError("The timeseries can't be plotted as it has no data present. "
"(len(self.data) == 0)")
# #### Miscellaneous #### #
def _validate_meta(self):
"""
Validates the meta-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and
u.Unit(self.meta.get(meta_property),
parse_strict='silent').physical_type == 'unknown'):
warnings.warn(f"Unknown value for {meta_property.upper()}.", SunpyUserWarning)
def _validate_units(self, units, **kwargs):
"""
Validates the astropy unit-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
result = True
for key in units:
if not isinstance(units[key], astropy.units.UnitBase):
# If this is not a unit then this can't be a valid units dict.
result = False
warnings.warn(f"Invalid unit given for {key}.", SunpyUserWarning)
return result
# MASKED: _sanitize_units function (lines 472-495)
def _sanitize_metadata(self, **kwargs):
"""
Sanitises the TimeSeriesMetaData object used to store the metadata.
Primarily this method will:
Remove entries outside of the datas TimeRange or truncate TimeRanges
if the metadata overflows past the data,
Remove column references in the metadata that don't match to a column
in the data.
Remove metadata entries that have no columns matching the data.
"""
warnings.simplefilter('always', Warning)
# Truncate the metadata
self.meta._truncate(self.time_range)
# Remove non-existant columns
redundant_cols = list(set(self.meta.columns) - set(self.columns))
self.meta._remove_columns(redundant_cols)
# #### Export/Output Methods #### #
def to_table(self, **kwargs):
"""
Return an Astropy Table of the give TimeSeries object.
Returns
-------
newtable : `~astrpy.table`
A new astropy table containing the data from the time series.
The table will include units where relevant.
"""
# ToDo: Table.from_pandas(df) doesn't include the index column. Add request?
# Get data columns
table = Table.from_pandas(self.data)
# Get index column and add to table.
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
# Add in units.
for key in self.units:
table[key].unit = self.units[key]
# Output the table
return table
def to_dataframe(self, **kwargs):
"""
Return a Pandas DataFrame of the give TimeSeries object.
Returns
-------
newdf : `~pandas.core.frame.DataFrame`
A Pandas Dataframe containing the data.
"""
return self.data
def to_array(self, columns=None):
"""
Return a numpy array of the give TimeSeries object.
Parameters
----------
columns: `list`, optional, default:None
If None, return all columns minus the index, otherwise, returns
specified columns.
Returns
-------
values : `~numpy.ndarray`
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
"""
if columns:
return self.data.values[columns]
else:
return self.data.values
def __eq__(self, other):
"""
Check two TimeSeries objects are the same, they have matching type, data,
metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or
(self.meta != other.meta) or
(self.units != other.units)):
match = False
else:
match = False
return match
def __ne__(self, other):
"""
Check two TimeSeries objects are not the same, they don't have matching
type, data, metadata and/or units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
return not self == other
@classmethod
def _parse_file(cls, filepath):
"""Parses a file - to be implemented in any subclass that may use files"""
return NotImplemented
|
def _sanitize_units(self, **kwargs):
"""
Sanitises the collections.OrderedDict used to store the units.
Primarily this method will:
Remove entries that don't match up to a column,
Add unitless entries for columns with no units defined.
Re-arrange the order of the dictionary to match the columns.
"""
warnings.simplefilter('always', Warning)
# Populate unspecified units:
for column in set(self.data.columns.tolist()) - set(self.units.keys()):
# For all columns not present in the units dictionary.
self.units[column] = u.dimensionless_unscaled
warnings.warn(f"Unknown units for {column}.", SunpyUserWarning)
# Re-arrange so it's in the same order as the columns and removed unused.
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column:self.units[column]})
# Now use the amended units Ordered Dictionary
self.units = units
| 472 | 495 |
"""
TimeSeries is a generic time series class from which all other TimeSeries
classes inherit from.
"""
import copy
import warnings
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
import astropy
import astropy.units as u
from astropy.table import Table, Column
from sunpy import config
from sunpy.time import TimeRange
from sunpy.timeseries import TimeSeriesMetaData
from sunpy.util.metadata import MetaDict
from sunpy.util.exceptions import SunpyUserWarning
# define and register a new unit, needed for RHESSI
det = u.def_unit('detector')
u.add_enabled_units([det])
TIME_FORMAT = config.get("general", "time_format")
class GenericTimeSeries:
"""
A generic time series object.
Parameters
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional
The metadata giving details about the time series data/instrument.
units : dict, optional
A mapping from column names in *data* to the physical units of
that column.
Attributes
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`
The metadata giving details about the time series data/instrument.
units : dict
A mapping from column names in *data* to the physical units of
that column.
Examples
--------
>>> from sunpy.timeseries import TimeSeries
>>> from sunpy.time import parse_time
>>> import datetime
>>> from astropy.time import TimeDelta
>>> import numpy as np
>>> import pandas as pd
>>> base = parse_time(datetime.datetime.today())
>>> times = base - TimeDelta(np.arange(24 * 60)*u.minute)
>>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))
>>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])
>>> ts = TimeSeries(df)
>>> ts.peek() # doctest: +SKIP
References
----------
* `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = None
_registry = dict()
def __init_subclass__(cls, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `is_datasource_for`
attribute. This is then passed into the TimeSeries Factory so we can register them.
"""
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for
def __init__(self, data, meta=None, units=None, **kwargs):
self.data = data
tr = self.time_range
# Check metadata input
if meta is None:
# No meta given, so default
self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))
elif isinstance(meta, (dict, OrderedDict, MetaDict)):
# Given the values for metadata (dict) and infer timerange and colnames from the data
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
elif isinstance(meta, tuple):
# Given the values all in a tuple
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
else:
# Should have a list of 3-tuples giving a complex metadata list.
self.meta = meta
if units is None:
self.units = {}
else:
self.units = units
# Validate input data
# self._validate_meta()
# self._validate_units()
# #### Attribute definitions #### #
@property
def source(self):
"""
A string/object used to specify the source class of the TimeSeries.
"""
return self._source
@property
def columns(self):
"""A list of all the names of the columns in the data."""
return list(self.data.columns.values)
@property
def index(self):
"""The time index of the data."""
return self.data.index
@property
def time_range(self):
"""
The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`
object
"""
if len(self.data)>0:
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None
# #### Data Access, Selection and Organisation Methods #### #
def quantity(self, colname, **kwargs):
"""
Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want output.
Returns
-------
quantity : `~astropy.units.quantity.Quantity`
"""
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit)
def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
"""
Return an new TimeSeries with the given column added or updated.
Parameters
----------
colname : `str`
The heading of the column you want output.
quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`
The values to be placed within the column.
If updating values only then a numpy array is permitted.
overwrite : `bool`, optional, default:True
Set to true to allow the method to overwrite a column already present
in the TimeSeries.
Returns
-------
newts : TimeSeries
"""
# Get the expected units from the quantity if required
if not unit and isinstance(quantity, astropy.units.quantity.Quantity):
unit = quantity.unit
elif not unit:
unit = u.dimensionless_unscaled
# Make a copy of all the TimeSeries components.
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
# Add the unit to the units dictionary if already there.
if not (colname in self.data.columns):
units[colname] = unit
# Convert the given quantity into values for given units if necessary.
values = quantity
if isinstance(values, astropy.units.quantity.Quantity) and overwrite:
values = values.to(units[colname]).value
# Update or add the data.
if not (colname in self.data.columns) or overwrite:
data[colname] = values
# Return a new TimeSeries with the given updated/added column.
return self.__class__(data, meta, units)
def sort_index(self, **kwargs):
"""Returns a sorted version of the TimeSeries object.
Generally this shouldn't be necessary as most TimeSeries operations sort
the data anyway to ensure consistent behaviour when truncating.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series in ascending chronological order.
"""
return GenericTimeSeries(self.data.sort_index(**kwargs),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
def truncate(self, a, b=None, int=None):
"""Returns a truncated version of the TimeSeries object.
Parameters
----------
a : `sunpy.time.TimeRange`, `str` or `int`
Either a time range to truncate to, or a start time in some format
recognised by pandas, or a index integer.
b : `str` or `int`
If specified, the end time of the time range in some format
recognised by pandas, or a index integer.
int : `int`
If specified, the integer indicating the slicing intervals.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected times.
"""
# Evaluate inputs
# If given strings, then use to create a sunpy.time.timerange.TimeRange
# for the SunPy text date parser.
if isinstance(a, str) and isinstance(b, str):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
# If we have a TimeRange, extract the values
start = a.start.datetime
end = a.end.datetime
else:
# Otherwise we already have the values
start = a
end = b
# If an interval integer was given then use in truncation.
truncated_data = self.data.sort_index()[start:end:int]
# Truncate the metadata
# Check there is data still
truncated_meta = TimeSeriesMetaData([])
if len(truncated_data) > 0:
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
# Build similar TimeSeries object and sanatise metadata and units.
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def extract(self, column_name):
"""Returns a new time series with the chosen column.
Parameters
----------
column_name : `str`
A valid column name.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected column.
"""
"""
# TODO allow the extract function to pick more than one column
if isinstance(self, pandas.Series):
return self
else:
return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))
"""
# Extract column and remove empty rows
data = self.data[[column_name]].dropna()
# Build generic TimeSeries object and sanatise metadata and units.
object = GenericTimeSeries(data.sort_index(),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def concatenate(self, otherts, **kwargs):
"""Concatenate with another TimeSeries. This function will check and
remove any duplicate times. It will keep the column values from the
original time series to which the new time series is being added.
Parameters
----------
otherts : `~sunpy.timeseries.TimeSeries`
Another time series.
same_source : `bool` Optional
Set to true to check if the sources of the time series match.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series.
Debate: decide if we want to be able to concatenate multiple time series
at once.
"""
# check to see if nothing needs to be done
if self == otherts:
return self
# Check the sources match if specified.
same_source = kwargs.get('same_source', False)
if same_source and not (isinstance(otherts, self.__class__)):
raise TypeError("TimeSeries classes must match if specified.")
# Concatenate the metadata and data
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
# Add all the new units to the dictionary.
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
# If sources match then build similar TimeSeries.
if self.__class__ == otherts.__class__:
object = self.__class__(data.sort_index(), meta, units)
else:
# Build generic time series if the sources don't match.
object = GenericTimeSeries(data.sort_index(), meta, units)
# Sanatise metadata and units
object._sanitize_metadata()
object._sanitize_units()
return object
# #### Plotting Methods #### #
def plot(self, axes=None, **plot_args):
"""Plot a plot of the time series
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if axes is None:
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes
def peek(self, **kwargs):
"""Displays the time series in a new figure.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
# Now make the plot
figure = plt.figure()
self.plot(**kwargs)
figure.show()
def _validate_data_for_ploting(self):
"""Raises an exception if the timeseries is invalid for plotting.
To be added into all the peek methods in all source sup-classes.
Currently only checks if we have an empty timeseries, where:
len(self.data) == 0
"""
# Check we have a valid TS
if len(self.data) == 0:
raise ValueError("The timeseries can't be plotted as it has no data present. "
"(len(self.data) == 0)")
# #### Miscellaneous #### #
def _validate_meta(self):
"""
Validates the meta-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and
u.Unit(self.meta.get(meta_property),
parse_strict='silent').physical_type == 'unknown'):
warnings.warn(f"Unknown value for {meta_property.upper()}.", SunpyUserWarning)
def _validate_units(self, units, **kwargs):
"""
Validates the astropy unit-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
result = True
for key in units:
if not isinstance(units[key], astropy.units.UnitBase):
# If this is not a unit then this can't be a valid units dict.
result = False
warnings.warn(f"Invalid unit given for {key}.", SunpyUserWarning)
return result
def _sanitize_units(self, **kwargs):
"""
Sanitises the collections.OrderedDict used to store the units.
Primarily this method will:
Remove entries that don't match up to a column,
Add unitless entries for columns with no units defined.
Re-arrange the order of the dictionary to match the columns.
"""
warnings.simplefilter('always', Warning)
# Populate unspecified units:
for column in set(self.data.columns.tolist()) - set(self.units.keys()):
# For all columns not present in the units dictionary.
self.units[column] = u.dimensionless_unscaled
warnings.warn(f"Unknown units for {column}.", SunpyUserWarning)
# Re-arrange so it's in the same order as the columns and removed unused.
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column:self.units[column]})
# Now use the amended units Ordered Dictionary
self.units = units
def _sanitize_metadata(self, **kwargs):
"""
Sanitises the TimeSeriesMetaData object used to store the metadata.
Primarily this method will:
Remove entries outside of the datas TimeRange or truncate TimeRanges
if the metadata overflows past the data,
Remove column references in the metadata that don't match to a column
in the data.
Remove metadata entries that have no columns matching the data.
"""
warnings.simplefilter('always', Warning)
# Truncate the metadata
self.meta._truncate(self.time_range)
# Remove non-existant columns
redundant_cols = list(set(self.meta.columns) - set(self.columns))
self.meta._remove_columns(redundant_cols)
# #### Export/Output Methods #### #
def to_table(self, **kwargs):
"""
Return an Astropy Table of the give TimeSeries object.
Returns
-------
newtable : `~astrpy.table`
A new astropy table containing the data from the time series.
The table will include units where relevant.
"""
# ToDo: Table.from_pandas(df) doesn't include the index column. Add request?
# Get data columns
table = Table.from_pandas(self.data)
# Get index column and add to table.
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
# Add in units.
for key in self.units:
table[key].unit = self.units[key]
# Output the table
return table
def to_dataframe(self, **kwargs):
"""
Return a Pandas DataFrame of the give TimeSeries object.
Returns
-------
newdf : `~pandas.core.frame.DataFrame`
A Pandas Dataframe containing the data.
"""
return self.data
def to_array(self, columns=None):
"""
Return a numpy array of the give TimeSeries object.
Parameters
----------
columns: `list`, optional, default:None
If None, return all columns minus the index, otherwise, returns
specified columns.
Returns
-------
values : `~numpy.ndarray`
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
"""
if columns:
return self.data.values[columns]
else:
return self.data.values
def __eq__(self, other):
"""
Check two TimeSeries objects are the same, they have matching type, data,
metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or
(self.meta != other.meta) or
(self.units != other.units)):
match = False
else:
match = False
return match
def __ne__(self, other):
"""
Check two TimeSeries objects are not the same, they don't have matching
type, data, metadata and/or units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
return not self == other
@classmethod
def _parse_file(cls, filepath):
"""Parses a file - to be implemented in any subclass that may use files"""
return NotImplemented
|
_sanitize_metadata
|
Sanitises the TimeSeriesMetaData object used to store the metadata.
Primarily this method will:
Remove entries outside of the datas TimeRange or truncate TimeRanges
if the metadata overflows past the data,
Remove column references in the metadata that don't match to a column
in the data.
Remove metadata entries that have no columns matching the data.
|
"""
TimeSeries is a generic time series class from which all other TimeSeries
classes inherit from.
"""
import copy
import warnings
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
import astropy
import astropy.units as u
from astropy.table import Table, Column
from sunpy import config
from sunpy.time import TimeRange
from sunpy.timeseries import TimeSeriesMetaData
from sunpy.util.metadata import MetaDict
from sunpy.util.exceptions import SunpyUserWarning
# define and register a new unit, needed for RHESSI
det = u.def_unit('detector')
u.add_enabled_units([det])
TIME_FORMAT = config.get("general", "time_format")
class GenericTimeSeries:
"""
A generic time series object.
Parameters
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional
The metadata giving details about the time series data/instrument.
units : dict, optional
A mapping from column names in *data* to the physical units of
that column.
Attributes
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`
The metadata giving details about the time series data/instrument.
units : dict
A mapping from column names in *data* to the physical units of
that column.
Examples
--------
>>> from sunpy.timeseries import TimeSeries
>>> from sunpy.time import parse_time
>>> import datetime
>>> from astropy.time import TimeDelta
>>> import numpy as np
>>> import pandas as pd
>>> base = parse_time(datetime.datetime.today())
>>> times = base - TimeDelta(np.arange(24 * 60)*u.minute)
>>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))
>>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])
>>> ts = TimeSeries(df)
>>> ts.peek() # doctest: +SKIP
References
----------
* `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = None
_registry = dict()
def __init_subclass__(cls, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `is_datasource_for`
attribute. This is then passed into the TimeSeries Factory so we can register them.
"""
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for
def __init__(self, data, meta=None, units=None, **kwargs):
self.data = data
tr = self.time_range
# Check metadata input
if meta is None:
# No meta given, so default
self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))
elif isinstance(meta, (dict, OrderedDict, MetaDict)):
# Given the values for metadata (dict) and infer timerange and colnames from the data
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
elif isinstance(meta, tuple):
# Given the values all in a tuple
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
else:
# Should have a list of 3-tuples giving a complex metadata list.
self.meta = meta
if units is None:
self.units = {}
else:
self.units = units
# Validate input data
# self._validate_meta()
# self._validate_units()
# #### Attribute definitions #### #
@property
def source(self):
"""
A string/object used to specify the source class of the TimeSeries.
"""
return self._source
@property
def columns(self):
"""A list of all the names of the columns in the data."""
return list(self.data.columns.values)
@property
def index(self):
"""The time index of the data."""
return self.data.index
@property
def time_range(self):
"""
The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`
object
"""
if len(self.data)>0:
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None
# #### Data Access, Selection and Organisation Methods #### #
def quantity(self, colname, **kwargs):
"""
Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want output.
Returns
-------
quantity : `~astropy.units.quantity.Quantity`
"""
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit)
def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
"""
Return an new TimeSeries with the given column added or updated.
Parameters
----------
colname : `str`
The heading of the column you want output.
quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`
The values to be placed within the column.
If updating values only then a numpy array is permitted.
overwrite : `bool`, optional, default:True
Set to true to allow the method to overwrite a column already present
in the TimeSeries.
Returns
-------
newts : TimeSeries
"""
# Get the expected units from the quantity if required
if not unit and isinstance(quantity, astropy.units.quantity.Quantity):
unit = quantity.unit
elif not unit:
unit = u.dimensionless_unscaled
# Make a copy of all the TimeSeries components.
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
# Add the unit to the units dictionary if already there.
if not (colname in self.data.columns):
units[colname] = unit
# Convert the given quantity into values for given units if necessary.
values = quantity
if isinstance(values, astropy.units.quantity.Quantity) and overwrite:
values = values.to(units[colname]).value
# Update or add the data.
if not (colname in self.data.columns) or overwrite:
data[colname] = values
# Return a new TimeSeries with the given updated/added column.
return self.__class__(data, meta, units)
def sort_index(self, **kwargs):
"""Returns a sorted version of the TimeSeries object.
Generally this shouldn't be necessary as most TimeSeries operations sort
the data anyway to ensure consistent behaviour when truncating.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series in ascending chronological order.
"""
return GenericTimeSeries(self.data.sort_index(**kwargs),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
def truncate(self, a, b=None, int=None):
"""Returns a truncated version of the TimeSeries object.
Parameters
----------
a : `sunpy.time.TimeRange`, `str` or `int`
Either a time range to truncate to, or a start time in some format
recognised by pandas, or a index integer.
b : `str` or `int`
If specified, the end time of the time range in some format
recognised by pandas, or a index integer.
int : `int`
If specified, the integer indicating the slicing intervals.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected times.
"""
# Evaluate inputs
# If given strings, then use to create a sunpy.time.timerange.TimeRange
# for the SunPy text date parser.
if isinstance(a, str) and isinstance(b, str):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
# If we have a TimeRange, extract the values
start = a.start.datetime
end = a.end.datetime
else:
# Otherwise we already have the values
start = a
end = b
# If an interval integer was given then use in truncation.
truncated_data = self.data.sort_index()[start:end:int]
# Truncate the metadata
# Check there is data still
truncated_meta = TimeSeriesMetaData([])
if len(truncated_data) > 0:
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
# Build similar TimeSeries object and sanatise metadata and units.
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def extract(self, column_name):
"""Returns a new time series with the chosen column.
Parameters
----------
column_name : `str`
A valid column name.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected column.
"""
"""
# TODO allow the extract function to pick more than one column
if isinstance(self, pandas.Series):
return self
else:
return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))
"""
# Extract column and remove empty rows
data = self.data[[column_name]].dropna()
# Build generic TimeSeries object and sanatise metadata and units.
object = GenericTimeSeries(data.sort_index(),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def concatenate(self, otherts, **kwargs):
"""Concatenate with another TimeSeries. This function will check and
remove any duplicate times. It will keep the column values from the
original time series to which the new time series is being added.
Parameters
----------
otherts : `~sunpy.timeseries.TimeSeries`
Another time series.
same_source : `bool` Optional
Set to true to check if the sources of the time series match.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series.
Debate: decide if we want to be able to concatenate multiple time series
at once.
"""
# check to see if nothing needs to be done
if self == otherts:
return self
# Check the sources match if specified.
same_source = kwargs.get('same_source', False)
if same_source and not (isinstance(otherts, self.__class__)):
raise TypeError("TimeSeries classes must match if specified.")
# Concatenate the metadata and data
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
# Add all the new units to the dictionary.
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
# If sources match then build similar TimeSeries.
if self.__class__ == otherts.__class__:
object = self.__class__(data.sort_index(), meta, units)
else:
# Build generic time series if the sources don't match.
object = GenericTimeSeries(data.sort_index(), meta, units)
# Sanatise metadata and units
object._sanitize_metadata()
object._sanitize_units()
return object
# #### Plotting Methods #### #
def plot(self, axes=None, **plot_args):
"""Plot a plot of the time series
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if axes is None:
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes
def peek(self, **kwargs):
"""Displays the time series in a new figure.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
# Now make the plot
figure = plt.figure()
self.plot(**kwargs)
figure.show()
def _validate_data_for_ploting(self):
"""Raises an exception if the timeseries is invalid for plotting.
To be added into all the peek methods in all source sup-classes.
Currently only checks if we have an empty timeseries, where:
len(self.data) == 0
"""
# Check we have a valid TS
if len(self.data) == 0:
raise ValueError("The timeseries can't be plotted as it has no data present. "
"(len(self.data) == 0)")
# #### Miscellaneous #### #
def _validate_meta(self):
"""
Validates the meta-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and
u.Unit(self.meta.get(meta_property),
parse_strict='silent').physical_type == 'unknown'):
warnings.warn(f"Unknown value for {meta_property.upper()}.", SunpyUserWarning)
def _validate_units(self, units, **kwargs):
"""
Validates the astropy unit-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
result = True
for key in units:
if not isinstance(units[key], astropy.units.UnitBase):
# If this is not a unit then this can't be a valid units dict.
result = False
warnings.warn(f"Invalid unit given for {key}.", SunpyUserWarning)
return result
def _sanitize_units(self, **kwargs):
"""
Sanitises the collections.OrderedDict used to store the units.
Primarily this method will:
Remove entries that don't match up to a column,
Add unitless entries for columns with no units defined.
Re-arrange the order of the dictionary to match the columns.
"""
warnings.simplefilter('always', Warning)
# Populate unspecified units:
for column in set(self.data.columns.tolist()) - set(self.units.keys()):
# For all columns not present in the units dictionary.
self.units[column] = u.dimensionless_unscaled
warnings.warn(f"Unknown units for {column}.", SunpyUserWarning)
# Re-arrange so it's in the same order as the columns and removed unused.
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column:self.units[column]})
# Now use the amended units Ordered Dictionary
self.units = units
# MASKED: _sanitize_metadata function (lines 497-515)
# #### Export/Output Methods #### #
def to_table(self, **kwargs):
"""
Return an Astropy Table of the give TimeSeries object.
Returns
-------
newtable : `~astrpy.table`
A new astropy table containing the data from the time series.
The table will include units where relevant.
"""
# ToDo: Table.from_pandas(df) doesn't include the index column. Add request?
# Get data columns
table = Table.from_pandas(self.data)
# Get index column and add to table.
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
# Add in units.
for key in self.units:
table[key].unit = self.units[key]
# Output the table
return table
def to_dataframe(self, **kwargs):
"""
Return a Pandas DataFrame of the give TimeSeries object.
Returns
-------
newdf : `~pandas.core.frame.DataFrame`
A Pandas Dataframe containing the data.
"""
return self.data
def to_array(self, columns=None):
"""
Return a numpy array of the give TimeSeries object.
Parameters
----------
columns: `list`, optional, default:None
If None, return all columns minus the index, otherwise, returns
specified columns.
Returns
-------
values : `~numpy.ndarray`
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
"""
if columns:
return self.data.values[columns]
else:
return self.data.values
def __eq__(self, other):
"""
Check two TimeSeries objects are the same, they have matching type, data,
metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or
(self.meta != other.meta) or
(self.units != other.units)):
match = False
else:
match = False
return match
def __ne__(self, other):
"""
Check two TimeSeries objects are not the same, they don't have matching
type, data, metadata and/or units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
return not self == other
@classmethod
def _parse_file(cls, filepath):
"""Parses a file - to be implemented in any subclass that may use files"""
return NotImplemented
|
def _sanitize_metadata(self, **kwargs):
"""
Sanitises the TimeSeriesMetaData object used to store the metadata.
Primarily this method will:
Remove entries outside of the datas TimeRange or truncate TimeRanges
if the metadata overflows past the data,
Remove column references in the metadata that don't match to a column
in the data.
Remove metadata entries that have no columns matching the data.
"""
warnings.simplefilter('always', Warning)
# Truncate the metadata
self.meta._truncate(self.time_range)
# Remove non-existant columns
redundant_cols = list(set(self.meta.columns) - set(self.columns))
self.meta._remove_columns(redundant_cols)
| 497 | 515 |
"""
TimeSeries is a generic time series class from which all other TimeSeries
classes inherit from.
"""
import copy
import warnings
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
import astropy
import astropy.units as u
from astropy.table import Table, Column
from sunpy import config
from sunpy.time import TimeRange
from sunpy.timeseries import TimeSeriesMetaData
from sunpy.util.metadata import MetaDict
from sunpy.util.exceptions import SunpyUserWarning
# define and register a new unit, needed for RHESSI
det = u.def_unit('detector')
u.add_enabled_units([det])
TIME_FORMAT = config.get("general", "time_format")
class GenericTimeSeries:
"""
A generic time series object.
Parameters
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional
The metadata giving details about the time series data/instrument.
units : dict, optional
A mapping from column names in *data* to the physical units of
that column.
Attributes
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`
The metadata giving details about the time series data/instrument.
units : dict
A mapping from column names in *data* to the physical units of
that column.
Examples
--------
>>> from sunpy.timeseries import TimeSeries
>>> from sunpy.time import parse_time
>>> import datetime
>>> from astropy.time import TimeDelta
>>> import numpy as np
>>> import pandas as pd
>>> base = parse_time(datetime.datetime.today())
>>> times = base - TimeDelta(np.arange(24 * 60)*u.minute)
>>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))
>>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])
>>> ts = TimeSeries(df)
>>> ts.peek() # doctest: +SKIP
References
----------
* `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = None
_registry = dict()
def __init_subclass__(cls, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `is_datasource_for`
attribute. This is then passed into the TimeSeries Factory so we can register them.
"""
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for
def __init__(self, data, meta=None, units=None, **kwargs):
self.data = data
tr = self.time_range
# Check metadata input
if meta is None:
# No meta given, so default
self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))
elif isinstance(meta, (dict, OrderedDict, MetaDict)):
# Given the values for metadata (dict) and infer timerange and colnames from the data
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
elif isinstance(meta, tuple):
# Given the values all in a tuple
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
else:
# Should have a list of 3-tuples giving a complex metadata list.
self.meta = meta
if units is None:
self.units = {}
else:
self.units = units
# Validate input data
# self._validate_meta()
# self._validate_units()
# #### Attribute definitions #### #
@property
def source(self):
"""
A string/object used to specify the source class of the TimeSeries.
"""
return self._source
@property
def columns(self):
"""A list of all the names of the columns in the data."""
return list(self.data.columns.values)
@property
def index(self):
"""The time index of the data."""
return self.data.index
@property
def time_range(self):
"""
The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`
object
"""
if len(self.data)>0:
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None
# #### Data Access, Selection and Organisation Methods #### #
def quantity(self, colname, **kwargs):
"""
Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want output.
Returns
-------
quantity : `~astropy.units.quantity.Quantity`
"""
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit)
def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
"""
Return an new TimeSeries with the given column added or updated.
Parameters
----------
colname : `str`
The heading of the column you want output.
quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`
The values to be placed within the column.
If updating values only then a numpy array is permitted.
overwrite : `bool`, optional, default:True
Set to true to allow the method to overwrite a column already present
in the TimeSeries.
Returns
-------
newts : TimeSeries
"""
# Get the expected units from the quantity if required
if not unit and isinstance(quantity, astropy.units.quantity.Quantity):
unit = quantity.unit
elif not unit:
unit = u.dimensionless_unscaled
# Make a copy of all the TimeSeries components.
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
# Add the unit to the units dictionary if already there.
if not (colname in self.data.columns):
units[colname] = unit
# Convert the given quantity into values for given units if necessary.
values = quantity
if isinstance(values, astropy.units.quantity.Quantity) and overwrite:
values = values.to(units[colname]).value
# Update or add the data.
if not (colname in self.data.columns) or overwrite:
data[colname] = values
# Return a new TimeSeries with the given updated/added column.
return self.__class__(data, meta, units)
def sort_index(self, **kwargs):
"""Returns a sorted version of the TimeSeries object.
Generally this shouldn't be necessary as most TimeSeries operations sort
the data anyway to ensure consistent behaviour when truncating.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series in ascending chronological order.
"""
return GenericTimeSeries(self.data.sort_index(**kwargs),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
def truncate(self, a, b=None, int=None):
"""Returns a truncated version of the TimeSeries object.
Parameters
----------
a : `sunpy.time.TimeRange`, `str` or `int`
Either a time range to truncate to, or a start time in some format
recognised by pandas, or a index integer.
b : `str` or `int`
If specified, the end time of the time range in some format
recognised by pandas, or a index integer.
int : `int`
If specified, the integer indicating the slicing intervals.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected times.
"""
# Evaluate inputs
# If given strings, then use to create a sunpy.time.timerange.TimeRange
# for the SunPy text date parser.
if isinstance(a, str) and isinstance(b, str):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
# If we have a TimeRange, extract the values
start = a.start.datetime
end = a.end.datetime
else:
# Otherwise we already have the values
start = a
end = b
# If an interval integer was given then use in truncation.
truncated_data = self.data.sort_index()[start:end:int]
# Truncate the metadata
# Check there is data still
truncated_meta = TimeSeriesMetaData([])
if len(truncated_data) > 0:
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
# Build similar TimeSeries object and sanatise metadata and units.
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def extract(self, column_name):
"""Returns a new time series with the chosen column.
Parameters
----------
column_name : `str`
A valid column name.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected column.
"""
"""
# TODO allow the extract function to pick more than one column
if isinstance(self, pandas.Series):
return self
else:
return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))
"""
# Extract column and remove empty rows
data = self.data[[column_name]].dropna()
# Build generic TimeSeries object and sanatise metadata and units.
object = GenericTimeSeries(data.sort_index(),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def concatenate(self, otherts, **kwargs):
"""Concatenate with another TimeSeries. This function will check and
remove any duplicate times. It will keep the column values from the
original time series to which the new time series is being added.
Parameters
----------
otherts : `~sunpy.timeseries.TimeSeries`
Another time series.
same_source : `bool` Optional
Set to true to check if the sources of the time series match.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series.
Debate: decide if we want to be able to concatenate multiple time series
at once.
"""
# check to see if nothing needs to be done
if self == otherts:
return self
# Check the sources match if specified.
same_source = kwargs.get('same_source', False)
if same_source and not (isinstance(otherts, self.__class__)):
raise TypeError("TimeSeries classes must match if specified.")
# Concatenate the metadata and data
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
# Add all the new units to the dictionary.
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
# If sources match then build similar TimeSeries.
if self.__class__ == otherts.__class__:
object = self.__class__(data.sort_index(), meta, units)
else:
# Build generic time series if the sources don't match.
object = GenericTimeSeries(data.sort_index(), meta, units)
# Sanatise metadata and units
object._sanitize_metadata()
object._sanitize_units()
return object
# #### Plotting Methods #### #
def plot(self, axes=None, **plot_args):
"""Plot a plot of the time series
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if axes is None:
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes
def peek(self, **kwargs):
"""Displays the time series in a new figure.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
# Now make the plot
figure = plt.figure()
self.plot(**kwargs)
figure.show()
def _validate_data_for_ploting(self):
"""Raises an exception if the timeseries is invalid for plotting.
To be added into all the peek methods in all source sup-classes.
Currently only checks if we have an empty timeseries, where:
len(self.data) == 0
"""
# Check we have a valid TS
if len(self.data) == 0:
raise ValueError("The timeseries can't be plotted as it has no data present. "
"(len(self.data) == 0)")
# #### Miscellaneous #### #
def _validate_meta(self):
"""
Validates the meta-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and
u.Unit(self.meta.get(meta_property),
parse_strict='silent').physical_type == 'unknown'):
warnings.warn(f"Unknown value for {meta_property.upper()}.", SunpyUserWarning)
def _validate_units(self, units, **kwargs):
"""
Validates the astropy unit-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
result = True
for key in units:
if not isinstance(units[key], astropy.units.UnitBase):
# If this is not a unit then this can't be a valid units dict.
result = False
warnings.warn(f"Invalid unit given for {key}.", SunpyUserWarning)
return result
def _sanitize_units(self, **kwargs):
"""
Sanitises the collections.OrderedDict used to store the units.
Primarily this method will:
Remove entries that don't match up to a column,
Add unitless entries for columns with no units defined.
Re-arrange the order of the dictionary to match the columns.
"""
warnings.simplefilter('always', Warning)
# Populate unspecified units:
for column in set(self.data.columns.tolist()) - set(self.units.keys()):
# For all columns not present in the units dictionary.
self.units[column] = u.dimensionless_unscaled
warnings.warn(f"Unknown units for {column}.", SunpyUserWarning)
# Re-arrange so it's in the same order as the columns and removed unused.
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column:self.units[column]})
# Now use the amended units Ordered Dictionary
self.units = units
def _sanitize_metadata(self, **kwargs):
"""
Sanitises the TimeSeriesMetaData object used to store the metadata.
Primarily this method will:
Remove entries outside of the datas TimeRange or truncate TimeRanges
if the metadata overflows past the data,
Remove column references in the metadata that don't match to a column
in the data.
Remove metadata entries that have no columns matching the data.
"""
warnings.simplefilter('always', Warning)
# Truncate the metadata
self.meta._truncate(self.time_range)
# Remove non-existant columns
redundant_cols = list(set(self.meta.columns) - set(self.columns))
self.meta._remove_columns(redundant_cols)
# #### Export/Output Methods #### #
def to_table(self, **kwargs):
"""
Return an Astropy Table of the give TimeSeries object.
Returns
-------
newtable : `~astrpy.table`
A new astropy table containing the data from the time series.
The table will include units where relevant.
"""
# ToDo: Table.from_pandas(df) doesn't include the index column. Add request?
# Get data columns
table = Table.from_pandas(self.data)
# Get index column and add to table.
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
# Add in units.
for key in self.units:
table[key].unit = self.units[key]
# Output the table
return table
def to_dataframe(self, **kwargs):
"""
Return a Pandas DataFrame of the give TimeSeries object.
Returns
-------
newdf : `~pandas.core.frame.DataFrame`
A Pandas Dataframe containing the data.
"""
return self.data
def to_array(self, columns=None):
"""
Return a numpy array of the give TimeSeries object.
Parameters
----------
columns: `list`, optional, default:None
If None, return all columns minus the index, otherwise, returns
specified columns.
Returns
-------
values : `~numpy.ndarray`
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
"""
if columns:
return self.data.values[columns]
else:
return self.data.values
def __eq__(self, other):
"""
Check two TimeSeries objects are the same, they have matching type, data,
metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or
(self.meta != other.meta) or
(self.units != other.units)):
match = False
else:
match = False
return match
def __ne__(self, other):
"""
Check two TimeSeries objects are not the same, they don't have matching
type, data, metadata and/or units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
return not self == other
@classmethod
def _parse_file(cls, filepath):
"""Parses a file - to be implemented in any subclass that may use files"""
return NotImplemented
|
__eq__
|
Check two TimeSeries objects are the same, they have matching type, data,
metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
|
"""
TimeSeries is a generic time series class from which all other TimeSeries
classes inherit from.
"""
import copy
import warnings
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
import astropy
import astropy.units as u
from astropy.table import Table, Column
from sunpy import config
from sunpy.time import TimeRange
from sunpy.timeseries import TimeSeriesMetaData
from sunpy.util.metadata import MetaDict
from sunpy.util.exceptions import SunpyUserWarning
# define and register a new unit, needed for RHESSI
det = u.def_unit('detector')
u.add_enabled_units([det])
TIME_FORMAT = config.get("general", "time_format")
class GenericTimeSeries:
"""
A generic time series object.
Parameters
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional
The metadata giving details about the time series data/instrument.
units : dict, optional
A mapping from column names in *data* to the physical units of
that column.
Attributes
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`
The metadata giving details about the time series data/instrument.
units : dict
A mapping from column names in *data* to the physical units of
that column.
Examples
--------
>>> from sunpy.timeseries import TimeSeries
>>> from sunpy.time import parse_time
>>> import datetime
>>> from astropy.time import TimeDelta
>>> import numpy as np
>>> import pandas as pd
>>> base = parse_time(datetime.datetime.today())
>>> times = base - TimeDelta(np.arange(24 * 60)*u.minute)
>>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))
>>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])
>>> ts = TimeSeries(df)
>>> ts.peek() # doctest: +SKIP
References
----------
* `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = None
_registry = dict()
def __init_subclass__(cls, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `is_datasource_for`
attribute. This is then passed into the TimeSeries Factory so we can register them.
"""
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for
def __init__(self, data, meta=None, units=None, **kwargs):
self.data = data
tr = self.time_range
# Check metadata input
if meta is None:
# No meta given, so default
self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))
elif isinstance(meta, (dict, OrderedDict, MetaDict)):
# Given the values for metadata (dict) and infer timerange and colnames from the data
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
elif isinstance(meta, tuple):
# Given the values all in a tuple
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
else:
# Should have a list of 3-tuples giving a complex metadata list.
self.meta = meta
if units is None:
self.units = {}
else:
self.units = units
# Validate input data
# self._validate_meta()
# self._validate_units()
# #### Attribute definitions #### #
@property
def source(self):
"""
A string/object used to specify the source class of the TimeSeries.
"""
return self._source
@property
def columns(self):
"""A list of all the names of the columns in the data."""
return list(self.data.columns.values)
@property
def index(self):
"""The time index of the data."""
return self.data.index
@property
def time_range(self):
"""
The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`
object
"""
if len(self.data)>0:
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None
# #### Data Access, Selection and Organisation Methods #### #
def quantity(self, colname, **kwargs):
"""
Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want output.
Returns
-------
quantity : `~astropy.units.quantity.Quantity`
"""
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit)
def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
"""
Return an new TimeSeries with the given column added or updated.
Parameters
----------
colname : `str`
The heading of the column you want output.
quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`
The values to be placed within the column.
If updating values only then a numpy array is permitted.
overwrite : `bool`, optional, default:True
Set to true to allow the method to overwrite a column already present
in the TimeSeries.
Returns
-------
newts : TimeSeries
"""
# Get the expected units from the quantity if required
if not unit and isinstance(quantity, astropy.units.quantity.Quantity):
unit = quantity.unit
elif not unit:
unit = u.dimensionless_unscaled
# Make a copy of all the TimeSeries components.
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
# Add the unit to the units dictionary if already there.
if not (colname in self.data.columns):
units[colname] = unit
# Convert the given quantity into values for given units if necessary.
values = quantity
if isinstance(values, astropy.units.quantity.Quantity) and overwrite:
values = values.to(units[colname]).value
# Update or add the data.
if not (colname in self.data.columns) or overwrite:
data[colname] = values
# Return a new TimeSeries with the given updated/added column.
return self.__class__(data, meta, units)
def sort_index(self, **kwargs):
"""Returns a sorted version of the TimeSeries object.
Generally this shouldn't be necessary as most TimeSeries operations sort
the data anyway to ensure consistent behaviour when truncating.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series in ascending chronological order.
"""
return GenericTimeSeries(self.data.sort_index(**kwargs),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
def truncate(self, a, b=None, int=None):
"""Returns a truncated version of the TimeSeries object.
Parameters
----------
a : `sunpy.time.TimeRange`, `str` or `int`
Either a time range to truncate to, or a start time in some format
recognised by pandas, or a index integer.
b : `str` or `int`
If specified, the end time of the time range in some format
recognised by pandas, or a index integer.
int : `int`
If specified, the integer indicating the slicing intervals.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected times.
"""
# Evaluate inputs
# If given strings, then use to create a sunpy.time.timerange.TimeRange
# for the SunPy text date parser.
if isinstance(a, str) and isinstance(b, str):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
# If we have a TimeRange, extract the values
start = a.start.datetime
end = a.end.datetime
else:
# Otherwise we already have the values
start = a
end = b
# If an interval integer was given then use in truncation.
truncated_data = self.data.sort_index()[start:end:int]
# Truncate the metadata
# Check there is data still
truncated_meta = TimeSeriesMetaData([])
if len(truncated_data) > 0:
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
# Build similar TimeSeries object and sanatise metadata and units.
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def extract(self, column_name):
"""Returns a new time series with the chosen column.
Parameters
----------
column_name : `str`
A valid column name.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected column.
"""
"""
# TODO allow the extract function to pick more than one column
if isinstance(self, pandas.Series):
return self
else:
return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))
"""
# Extract column and remove empty rows
data = self.data[[column_name]].dropna()
# Build generic TimeSeries object and sanatise metadata and units.
object = GenericTimeSeries(data.sort_index(),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def concatenate(self, otherts, **kwargs):
"""Concatenate with another TimeSeries. This function will check and
remove any duplicate times. It will keep the column values from the
original time series to which the new time series is being added.
Parameters
----------
otherts : `~sunpy.timeseries.TimeSeries`
Another time series.
same_source : `bool` Optional
Set to true to check if the sources of the time series match.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series.
Debate: decide if we want to be able to concatenate multiple time series
at once.
"""
# check to see if nothing needs to be done
if self == otherts:
return self
# Check the sources match if specified.
same_source = kwargs.get('same_source', False)
if same_source and not (isinstance(otherts, self.__class__)):
raise TypeError("TimeSeries classes must match if specified.")
# Concatenate the metadata and data
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
# Add all the new units to the dictionary.
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
# If sources match then build similar TimeSeries.
if self.__class__ == otherts.__class__:
object = self.__class__(data.sort_index(), meta, units)
else:
# Build generic time series if the sources don't match.
object = GenericTimeSeries(data.sort_index(), meta, units)
# Sanatise metadata and units
object._sanitize_metadata()
object._sanitize_units()
return object
# #### Plotting Methods #### #
def plot(self, axes=None, **plot_args):
"""Plot a plot of the time series
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if axes is None:
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes
def peek(self, **kwargs):
"""Displays the time series in a new figure.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
# Now make the plot
figure = plt.figure()
self.plot(**kwargs)
figure.show()
def _validate_data_for_ploting(self):
"""Raises an exception if the timeseries is invalid for plotting.
To be added into all the peek methods in all source sup-classes.
Currently only checks if we have an empty timeseries, where:
len(self.data) == 0
"""
# Check we have a valid TS
if len(self.data) == 0:
raise ValueError("The timeseries can't be plotted as it has no data present. "
"(len(self.data) == 0)")
# #### Miscellaneous #### #
def _validate_meta(self):
"""
Validates the meta-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and
u.Unit(self.meta.get(meta_property),
parse_strict='silent').physical_type == 'unknown'):
warnings.warn(f"Unknown value for {meta_property.upper()}.", SunpyUserWarning)
def _validate_units(self, units, **kwargs):
"""
Validates the astropy unit-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
result = True
for key in units:
if not isinstance(units[key], astropy.units.UnitBase):
# If this is not a unit then this can't be a valid units dict.
result = False
warnings.warn(f"Invalid unit given for {key}.", SunpyUserWarning)
return result
def _sanitize_units(self, **kwargs):
"""
Sanitises the collections.OrderedDict used to store the units.
Primarily this method will:
Remove entries that don't match up to a column,
Add unitless entries for columns with no units defined.
Re-arrange the order of the dictionary to match the columns.
"""
warnings.simplefilter('always', Warning)
# Populate unspecified units:
for column in set(self.data.columns.tolist()) - set(self.units.keys()):
# For all columns not present in the units dictionary.
self.units[column] = u.dimensionless_unscaled
warnings.warn(f"Unknown units for {column}.", SunpyUserWarning)
# Re-arrange so it's in the same order as the columns and removed unused.
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column:self.units[column]})
# Now use the amended units Ordered Dictionary
self.units = units
def _sanitize_metadata(self, **kwargs):
"""
Sanitises the TimeSeriesMetaData object used to store the metadata.
Primarily this method will:
Remove entries outside of the datas TimeRange or truncate TimeRanges
if the metadata overflows past the data,
Remove column references in the metadata that don't match to a column
in the data.
Remove metadata entries that have no columns matching the data.
"""
warnings.simplefilter('always', Warning)
# Truncate the metadata
self.meta._truncate(self.time_range)
# Remove non-existant columns
redundant_cols = list(set(self.meta.columns) - set(self.columns))
self.meta._remove_columns(redundant_cols)
# #### Export/Output Methods #### #
def to_table(self, **kwargs):
"""
Return an Astropy Table of the give TimeSeries object.
Returns
-------
newtable : `~astrpy.table`
A new astropy table containing the data from the time series.
The table will include units where relevant.
"""
# ToDo: Table.from_pandas(df) doesn't include the index column. Add request?
# Get data columns
table = Table.from_pandas(self.data)
# Get index column and add to table.
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
# Add in units.
for key in self.units:
table[key].unit = self.units[key]
# Output the table
return table
def to_dataframe(self, **kwargs):
"""
Return a Pandas DataFrame of the give TimeSeries object.
Returns
-------
newdf : `~pandas.core.frame.DataFrame`
A Pandas Dataframe containing the data.
"""
return self.data
def to_array(self, columns=None):
"""
Return a numpy array of the give TimeSeries object.
Parameters
----------
columns: `list`, optional, default:None
If None, return all columns minus the index, otherwise, returns
specified columns.
Returns
-------
values : `~numpy.ndarray`
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
"""
if columns:
return self.data.values[columns]
else:
return self.data.values
# MASKED: __eq__ function (lines 576-598)
def __ne__(self, other):
"""
Check two TimeSeries objects are not the same, they don't have matching
type, data, metadata and/or units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
return not self == other
@classmethod
def _parse_file(cls, filepath):
"""Parses a file - to be implemented in any subclass that may use files"""
return NotImplemented
|
def __eq__(self, other):
"""
Check two TimeSeries objects are the same, they have matching type, data,
metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or
(self.meta != other.meta) or
(self.units != other.units)):
match = False
else:
match = False
return match
| 576 | 598 |
"""
TimeSeries is a generic time series class from which all other TimeSeries
classes inherit from.
"""
import copy
import warnings
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
import astropy
import astropy.units as u
from astropy.table import Table, Column
from sunpy import config
from sunpy.time import TimeRange
from sunpy.timeseries import TimeSeriesMetaData
from sunpy.util.metadata import MetaDict
from sunpy.util.exceptions import SunpyUserWarning
# define and register a new unit, needed for RHESSI
det = u.def_unit('detector')
u.add_enabled_units([det])
TIME_FORMAT = config.get("general", "time_format")
class GenericTimeSeries:
"""
A generic time series object.
Parameters
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`, optional
The metadata giving details about the time series data/instrument.
units : dict, optional
A mapping from column names in *data* to the physical units of
that column.
Attributes
----------
data : `~pandas.DataFrame`
A pandas DataFrame representing one or more fields as a function
of time.
meta : `~sunpy.timeseries.metadata.TimeSeriesMetaData`
The metadata giving details about the time series data/instrument.
units : dict
A mapping from column names in *data* to the physical units of
that column.
Examples
--------
>>> from sunpy.timeseries import TimeSeries
>>> from sunpy.time import parse_time
>>> import datetime
>>> from astropy.time import TimeDelta
>>> import numpy as np
>>> import pandas as pd
>>> base = parse_time(datetime.datetime.today())
>>> times = base - TimeDelta(np.arange(24 * 60)*u.minute)
>>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / (24 * 60)))
>>> df = pd.DataFrame(intensity, index=times, columns=['intensity'])
>>> ts = TimeSeries(df)
>>> ts.peek() # doctest: +SKIP
References
----------
* `Pandas Documentation <https://pandas.pydata.org/pandas-docs/stable/>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = None
_registry = dict()
def __init_subclass__(cls, **kwargs):
"""
An __init_subclass__ hook initializes all of the subclasses of a given class.
So for each subclass, it will call this block of code on import.
This replicates some metaclass magic without the need to be aware of metaclasses.
Here we use this to register each subclass in a dict that has the `is_datasource_for`
attribute. This is then passed into the TimeSeries Factory so we can register them.
"""
super().__init_subclass__(**kwargs)
if hasattr(cls, 'is_datasource_for'):
cls._registry[cls] = cls.is_datasource_for
def __init__(self, data, meta=None, units=None, **kwargs):
self.data = data
tr = self.time_range
# Check metadata input
if meta is None:
# No meta given, so default
self.meta = TimeSeriesMetaData(MetaDict(), tr, list(self.data.columns.values))
elif isinstance(meta, (dict, OrderedDict, MetaDict)):
# Given the values for metadata (dict) and infer timerange and colnames from the data
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
elif isinstance(meta, tuple):
# Given the values all in a tuple
self.meta = TimeSeriesMetaData(meta, tr, list(self.data.columns.values))
else:
# Should have a list of 3-tuples giving a complex metadata list.
self.meta = meta
if units is None:
self.units = {}
else:
self.units = units
# Validate input data
# self._validate_meta()
# self._validate_units()
# #### Attribute definitions #### #
@property
def source(self):
"""
A string/object used to specify the source class of the TimeSeries.
"""
return self._source
@property
def columns(self):
"""A list of all the names of the columns in the data."""
return list(self.data.columns.values)
@property
def index(self):
"""The time index of the data."""
return self.data.index
@property
def time_range(self):
"""
The start and end times of the TimeSeries as a `~sunpy.time.TimeRange`
object
"""
if len(self.data)>0:
return TimeRange(self.data.index.min(), self.data.index.max())
else:
return None
# #### Data Access, Selection and Organisation Methods #### #
def quantity(self, colname, **kwargs):
"""
Return a `~astropy.units.quantity.Quantity` for the given column.
Parameters
----------
colname : `str`
The heading of the column you want output.
Returns
-------
quantity : `~astropy.units.quantity.Quantity`
"""
values = self.data[colname].values
unit = self.units[colname]
return u.Quantity(values, unit)
def add_column(self, colname, quantity, unit=False, overwrite=True, **kwargs):
"""
Return an new TimeSeries with the given column added or updated.
Parameters
----------
colname : `str`
The heading of the column you want output.
quantity : `~astropy.units.quantity.Quantity` or `~numpy.ndarray`
The values to be placed within the column.
If updating values only then a numpy array is permitted.
overwrite : `bool`, optional, default:True
Set to true to allow the method to overwrite a column already present
in the TimeSeries.
Returns
-------
newts : TimeSeries
"""
# Get the expected units from the quantity if required
if not unit and isinstance(quantity, astropy.units.quantity.Quantity):
unit = quantity.unit
elif not unit:
unit = u.dimensionless_unscaled
# Make a copy of all the TimeSeries components.
data = copy.copy(self.data)
meta = TimeSeriesMetaData(copy.copy(self.meta.metadata))
units = copy.copy(self.units)
# Add the unit to the units dictionary if already there.
if not (colname in self.data.columns):
units[colname] = unit
# Convert the given quantity into values for given units if necessary.
values = quantity
if isinstance(values, astropy.units.quantity.Quantity) and overwrite:
values = values.to(units[colname]).value
# Update or add the data.
if not (colname in self.data.columns) or overwrite:
data[colname] = values
# Return a new TimeSeries with the given updated/added column.
return self.__class__(data, meta, units)
def sort_index(self, **kwargs):
"""Returns a sorted version of the TimeSeries object.
Generally this shouldn't be necessary as most TimeSeries operations sort
the data anyway to ensure consistent behaviour when truncating.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series in ascending chronological order.
"""
return GenericTimeSeries(self.data.sort_index(**kwargs),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
def truncate(self, a, b=None, int=None):
"""Returns a truncated version of the TimeSeries object.
Parameters
----------
a : `sunpy.time.TimeRange`, `str` or `int`
Either a time range to truncate to, or a start time in some format
recognised by pandas, or a index integer.
b : `str` or `int`
If specified, the end time of the time range in some format
recognised by pandas, or a index integer.
int : `int`
If specified, the integer indicating the slicing intervals.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected times.
"""
# Evaluate inputs
# If given strings, then use to create a sunpy.time.timerange.TimeRange
# for the SunPy text date parser.
if isinstance(a, str) and isinstance(b, str):
a = TimeRange(a, b)
if isinstance(a, TimeRange):
# If we have a TimeRange, extract the values
start = a.start.datetime
end = a.end.datetime
else:
# Otherwise we already have the values
start = a
end = b
# If an interval integer was given then use in truncation.
truncated_data = self.data.sort_index()[start:end:int]
# Truncate the metadata
# Check there is data still
truncated_meta = TimeSeriesMetaData([])
if len(truncated_data) > 0:
tr = TimeRange(truncated_data.index.min(), truncated_data.index.max())
truncated_meta = TimeSeriesMetaData(copy.deepcopy(self.meta.metadata))
truncated_meta._truncate(tr)
# Build similar TimeSeries object and sanatise metadata and units.
object = self.__class__(truncated_data.sort_index(), truncated_meta, copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def extract(self, column_name):
"""Returns a new time series with the chosen column.
Parameters
----------
column_name : `str`
A valid column name.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series with only the selected column.
"""
"""
# TODO allow the extract function to pick more than one column
if isinstance(self, pandas.Series):
return self
else:
return GenericTimeSeries(self.data[column_name], TimeSeriesMetaData(self.meta.metadata.copy()))
"""
# Extract column and remove empty rows
data = self.data[[column_name]].dropna()
# Build generic TimeSeries object and sanatise metadata and units.
object = GenericTimeSeries(data.sort_index(),
TimeSeriesMetaData(copy.copy(self.meta.metadata)),
copy.copy(self.units))
object._sanitize_metadata()
object._sanitize_units()
return object
def concatenate(self, otherts, **kwargs):
"""Concatenate with another TimeSeries. This function will check and
remove any duplicate times. It will keep the column values from the
original time series to which the new time series is being added.
Parameters
----------
otherts : `~sunpy.timeseries.TimeSeries`
Another time series.
same_source : `bool` Optional
Set to true to check if the sources of the time series match.
Returns
-------
newts : `~sunpy.timeseries.TimeSeries`
A new time series.
Debate: decide if we want to be able to concatenate multiple time series
at once.
"""
# check to see if nothing needs to be done
if self == otherts:
return self
# Check the sources match if specified.
same_source = kwargs.get('same_source', False)
if same_source and not (isinstance(otherts, self.__class__)):
raise TypeError("TimeSeries classes must match if specified.")
# Concatenate the metadata and data
meta = self.meta.concatenate(otherts.meta)
data = pd.concat([self.data.copy(), otherts.data], **kwargs)
# Add all the new units to the dictionary.
units = OrderedDict()
units.update(self.units)
units.update(otherts.units)
# If sources match then build similar TimeSeries.
if self.__class__ == otherts.__class__:
object = self.__class__(data.sort_index(), meta, units)
else:
# Build generic time series if the sources don't match.
object = GenericTimeSeries(data.sort_index(), meta, units)
# Sanatise metadata and units
object._sanitize_metadata()
object._sanitize_units()
return object
# #### Plotting Methods #### #
def plot(self, axes=None, **plot_args):
"""Plot a plot of the time series
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if axes is None:
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes
def peek(self, **kwargs):
"""Displays the time series in a new figure.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
# Now make the plot
figure = plt.figure()
self.plot(**kwargs)
figure.show()
def _validate_data_for_ploting(self):
"""Raises an exception if the timeseries is invalid for plotting.
To be added into all the peek methods in all source sup-classes.
Currently only checks if we have an empty timeseries, where:
len(self.data) == 0
"""
# Check we have a valid TS
if len(self.data) == 0:
raise ValueError("The timeseries can't be plotted as it has no data present. "
"(len(self.data) == 0)")
# #### Miscellaneous #### #
def _validate_meta(self):
"""
Validates the meta-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
for meta_property in ('cunit1', 'cunit2', 'waveunit'):
if (self.meta.get(meta_property) and
u.Unit(self.meta.get(meta_property),
parse_strict='silent').physical_type == 'unknown'):
warnings.warn(f"Unknown value for {meta_property.upper()}.", SunpyUserWarning)
def _validate_units(self, units, **kwargs):
"""
Validates the astropy unit-information associated with a TimeSeries.
This method includes very basic validation checks which apply to
all of the kinds of files that SunPy can read. Datasource-specific
validation should be handled in the relevant file in the
sunpy.timeseries.sources package.
Allows for default unit assignment for:
COL_UNITS
"""
warnings.simplefilter('always', Warning)
result = True
for key in units:
if not isinstance(units[key], astropy.units.UnitBase):
# If this is not a unit then this can't be a valid units dict.
result = False
warnings.warn(f"Invalid unit given for {key}.", SunpyUserWarning)
return result
def _sanitize_units(self, **kwargs):
"""
Sanitises the collections.OrderedDict used to store the units.
Primarily this method will:
Remove entries that don't match up to a column,
Add unitless entries for columns with no units defined.
Re-arrange the order of the dictionary to match the columns.
"""
warnings.simplefilter('always', Warning)
# Populate unspecified units:
for column in set(self.data.columns.tolist()) - set(self.units.keys()):
# For all columns not present in the units dictionary.
self.units[column] = u.dimensionless_unscaled
warnings.warn(f"Unknown units for {column}.", SunpyUserWarning)
# Re-arrange so it's in the same order as the columns and removed unused.
units = OrderedDict()
for column in self.data.columns.tolist():
units.update({column:self.units[column]})
# Now use the amended units Ordered Dictionary
self.units = units
def _sanitize_metadata(self, **kwargs):
"""
Sanitises the TimeSeriesMetaData object used to store the metadata.
Primarily this method will:
Remove entries outside of the datas TimeRange or truncate TimeRanges
if the metadata overflows past the data,
Remove column references in the metadata that don't match to a column
in the data.
Remove metadata entries that have no columns matching the data.
"""
warnings.simplefilter('always', Warning)
# Truncate the metadata
self.meta._truncate(self.time_range)
# Remove non-existant columns
redundant_cols = list(set(self.meta.columns) - set(self.columns))
self.meta._remove_columns(redundant_cols)
# #### Export/Output Methods #### #
def to_table(self, **kwargs):
"""
Return an Astropy Table of the give TimeSeries object.
Returns
-------
newtable : `~astrpy.table`
A new astropy table containing the data from the time series.
The table will include units where relevant.
"""
# ToDo: Table.from_pandas(df) doesn't include the index column. Add request?
# Get data columns
table = Table.from_pandas(self.data)
# Get index column and add to table.
index_col = Column(self.data.index.values, name='date')
table.add_column(index_col, index=0)
# Add in units.
for key in self.units:
table[key].unit = self.units[key]
# Output the table
return table
def to_dataframe(self, **kwargs):
"""
Return a Pandas DataFrame of the give TimeSeries object.
Returns
-------
newdf : `~pandas.core.frame.DataFrame`
A Pandas Dataframe containing the data.
"""
return self.data
def to_array(self, columns=None):
"""
Return a numpy array of the give TimeSeries object.
Parameters
----------
columns: `list`, optional, default:None
If None, return all columns minus the index, otherwise, returns
specified columns.
Returns
-------
values : `~numpy.ndarray`
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
"""
if columns:
return self.data.values[columns]
else:
return self.data.values
def __eq__(self, other):
"""
Check two TimeSeries objects are the same, they have matching type, data,
metadata and units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
match = True
if isinstance(other, type(self)):
if ((not self.data.equals(other.data)) or
(self.meta != other.meta) or
(self.units != other.units)):
match = False
else:
match = False
return match
def __ne__(self, other):
"""
Check two TimeSeries objects are not the same, they don't have matching
type, data, metadata and/or units entries.
Parameters
----------
other : `~sunpy.timeseries.GenericTimeSeries`
The second TimeSeries object to compare with.
Returns
-------
result : `bool`
"""
return not self == other
@classmethod
def _parse_file(cls, filepath):
"""Parses a file - to be implemented in any subclass that may use files"""
return NotImplemented
|
find_T_bit_to_right_of
|
Returns position of 1 (True) bit immediately to the right of
position bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
|
class BitVector:
"""
This class uses an int called dec_rep as a vector of self.len many bits,
where self.len <= self.max_len. The class wraps some common bitwise
operations, and some less common ones too (like Gray coding that is
needed by Qubiter). In some cases, the bitwise manipulation might be
more succinct than the corresponding function in this wrapper, but the
wrapper function's name spells out in words what is wanted.
Attributes
----------
dec_rep : int
decimal representation, the int whose binary representation carries
a bit vector of length self.len
len : int
the length of the bit vector
max_len : int
maximum self.len allowed
"""
def __init__(self, length, dec_rep):
"""
Constructor
Parameters
----------
length : int
dec_rep : int
Returns
-------
"""
self.len = length
self.dec_rep = dec_rep
self.max_len = 16
assert length <= self.max_len, "bit vector is too long"
assert length > 0, "bit vector len must be >=1"
@staticmethod
def copy(bvec):
"""
Copy constructor, returns a new BitVector which is a copy of the
BitVector bvec.
Parameters
----------
bvec : BitVector
Returns
-------
BitVector
"""
return BitVector(bvec.len, bvec.dec_rep)
def bit_is_T(self, bpos):
"""
Returns True iff bit at position bpos is 1 (True)
Parameters
----------
bpos : int
bit position
Returns
-------
bool
"""
assert bpos < self.len, "bit position is too large"
mask = (1 << bpos)
return (self.dec_rep & mask) == mask
def set_bit_T(self, bpos):
"""
Sets to 1 (True) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None
"""
assert bpos < self.len, "bit position is too large"
self.dec_rep |= (1 << bpos)
def set_bit_F(self, bpos):
"""
Sets to 0 (False) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None
"""
assert bpos < self.len, "bit position is too large"
self.dec_rep &= ~(1 << bpos)
def set_all_bits_T(self):
"""
Sets to 1 (True) the bits of self at position bpos
from 0 to len-1 inclusive.
Returns
-------
None
"""
# example: len = 3, dec_rep becomes 15 = b0111
self.dec_rep = (1 << self.len + 1) - 1
def set_all_bits_F(self):
"""
Sets to 0 (False) the bits of self at positions bpos
from 0 to len-1 inclusive.
Returns
-------
None
"""
self.dec_rep = 0
def get_num_T_bits(self):
"""
Returns the number of 1 (True) bits at positions bpos
from 0 to len-1 inclusive.
Returns
-------
int
"""
count = 0
for bpos in range(self.len):
if self.bit_is_T(bpos):
count += 1
return count
# MASKED: find_T_bit_to_right_of function (lines 151-182)
def find_T_bit_to_left_of(self, bpos):
"""
Returns position of 1 (True) bit immediately to the left of position
bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
"""
if bpos >= self.len-1:
return -1
left_T_bit = bpos
mask = (1 << left_T_bit)
found_it = False
while True:
left_T_bit += 1
mask <<= 1
found_it = ((self.dec_rep & mask) == mask)
if left_T_bit == self.len-1 or found_it:
break
if found_it:
return left_T_bit
else:
return -1
def find_leftmost_T_bit(self):
"""
Out of all 1 (True) bits, returns position of the leftmost one of
those to the the left of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int
"""
if self.bit_is_T(self.len-1):
return self.len-1
else:
return self.find_T_bit_to_right_of(self.len - 1)
def find_rightmost_T_bit(self):
"""
Out of all 1 (True) bits, returns position of the rightmost one of
those to the the right of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int
"""
if self.bit_is_T(0):
return 0
else:
return self.find_T_bit_to_left_of(0)
def get_bit_string(self):
"""
Returns self represented as string of length self.len of ones and
zeros. If bit_str is the output, [int(x) for x in bit_str] will turn
result to list of ints.
Returns
-------
str
"""
bit_str = ''
for beta in range(self.len-1, -1, -1):
if self.bit_is_T(beta):
bit_str += '1'
else:
bit_str += '0'
return bit_str
def __str__(self):
"""
Readable representation of self
Returns
-------
str
"""
return self.get_bit_string() + '=' + str(self.dec_rep)
@staticmethod
def new_with_T_on_diff(bvec1, bvec2):
"""
Given two BitVectors bevc1 and bvec2, this return a BitVector which
is a bitwise xor (mod 2 sum) of the bits of bvec1 and bvec2.
Parameters
----------
bvec1 : BitVector
bvec2 : BitVector
Returns
-------
BitVector
"""
assert bvec1.len == bvec2.len
return BitVector(bvec1.len, bvec1.dec_rep ^ bvec2.dec_rep)
@staticmethod
def get_lazy_from_normal(bit_len, normal):
"""
Throughout Qubiter, we will often refer to "Gray Code" as "lazy
ordering". In lazy ordering with bit_len many bits, one gives a
sequence of bit vectors of length bit_len, so that two adjacent
items of the sequence differ by just one bit. For example 000=0,
100=4, 110=6, 010=2, 011=3, 111=7, 101=5, 001=1. Each element of
this sequence represented as an int will be called lazy, and each
int in the sequence 0, 1, 2, 3, 4, 5, 6, 7 will be called normal.
Normal ordering is usually called dictionary ordering. Normal and
lazy sequences both start at 0.
Suppose bit_len = 3. The lazy sequence 000, 100, 110, 010, 011, 111,
101, 001 is easily obtained from the "standard" lazy sequence 000,
001, 011, 010, 110, 111, 101, 100 by "reflecting" each sequence
term. We will use the second sequence because it is more common in
the literature.
References
----------
1. Martin Gardener, "Knotted DoughNuts and Other
Mathematical Entertainments", chapt. 2, "The Binary Gray Code"
2. "Numerical Recipies in C"
3. Many books on Discrete Mathematics for CompSci types
4. On the web, in Eric's Treasure Trove/Math/Gray Codes
Parameters
----------
bit_len : int
normal : int
Function returns the lazy int that corresponds to this normal int.
Returns
-------
int
"""
lazy_bvec = BitVector(bit_len, normal)
lazy = lazy_bvec.dec_rep
if bit_len > 1:
for m in range(bit_len-2, -1, -1):
# Look at bpos = m+1, if it's ON, then flip bpos=m.
# Remember that ^ is same as a mod2 sum.
lazy ^= (((normal >> m+1) & 1) << m)
return lazy
@staticmethod
def lazy_advance(old_normal, old_lazy):
"""
This method takes int "old_lazy" (which corresponds to bit vector
"old_normal"), and changes it to the next lazy int, "new_lazy" (
which corresponds to "new_normal").
example:
lazy sequence: 000, 001, 011, 010, 110, 111, 101, 100
old_lazy = 011
old_normal = 2 = 010
new_normal = 3 = 011
mask = (new_normal & ~old_normal) = 011 & 101 = 001
new_lazy = new_normal ^ mask = 011 ^ 001 = 010
Parameters
----------
old_normal : int
old_lazy : int
Returns
-------
int, int
"""
new_normal = old_normal + 1
new_lazy = old_lazy ^ (new_normal & ~(new_normal-1))
return new_normal, new_lazy
if __name__ == "__main__":
def main():
for length in [3, 4]:
print('\nlength=', length)
print('normal, lazy, lazy in binary:')
max_normal = (1 << length) - 1
normal = 0
lazy = 0
while normal <= max_normal:
lazy_bvec = BitVector(length, lazy)
print(normal, lazy, BitVector.get_bit_string(lazy_bvec))
normal, lazy = BitVector.lazy_advance(normal, lazy)
main()
|
def find_T_bit_to_right_of(self, bpos):
"""
Returns position of 1 (True) bit immediately to the right of
position bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
"""
if bpos <= 0:
return -1
right_T_bit = bpos
mask = (1 << right_T_bit)
found_it = False
while True:
right_T_bit -= 1
mask >>= 1
found_it = ((self.dec_rep & mask) == mask)
if right_T_bit == 0 or found_it:
break
if found_it:
return right_T_bit
else:
return -1
| 151 | 182 |
class BitVector:
"""
This class uses an int called dec_rep as a vector of self.len many bits,
where self.len <= self.max_len. The class wraps some common bitwise
operations, and some less common ones too (like Gray coding that is
needed by Qubiter). In some cases, the bitwise manipulation might be
more succinct than the corresponding function in this wrapper, but the
wrapper function's name spells out in words what is wanted.
Attributes
----------
dec_rep : int
decimal representation, the int whose binary representation carries
a bit vector of length self.len
len : int
the length of the bit vector
max_len : int
maximum self.len allowed
"""
def __init__(self, length, dec_rep):
"""
Constructor
Parameters
----------
length : int
dec_rep : int
Returns
-------
"""
self.len = length
self.dec_rep = dec_rep
self.max_len = 16
assert length <= self.max_len, "bit vector is too long"
assert length > 0, "bit vector len must be >=1"
@staticmethod
def copy(bvec):
"""
Copy constructor, returns a new BitVector which is a copy of the
BitVector bvec.
Parameters
----------
bvec : BitVector
Returns
-------
BitVector
"""
return BitVector(bvec.len, bvec.dec_rep)
def bit_is_T(self, bpos):
"""
Returns True iff bit at position bpos is 1 (True)
Parameters
----------
bpos : int
bit position
Returns
-------
bool
"""
assert bpos < self.len, "bit position is too large"
mask = (1 << bpos)
return (self.dec_rep & mask) == mask
def set_bit_T(self, bpos):
"""
Sets to 1 (True) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None
"""
assert bpos < self.len, "bit position is too large"
self.dec_rep |= (1 << bpos)
def set_bit_F(self, bpos):
"""
Sets to 0 (False) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None
"""
assert bpos < self.len, "bit position is too large"
self.dec_rep &= ~(1 << bpos)
def set_all_bits_T(self):
"""
Sets to 1 (True) the bits of self at position bpos
from 0 to len-1 inclusive.
Returns
-------
None
"""
# example: len = 3, dec_rep becomes 15 = b0111
self.dec_rep = (1 << self.len + 1) - 1
def set_all_bits_F(self):
"""
Sets to 0 (False) the bits of self at positions bpos
from 0 to len-1 inclusive.
Returns
-------
None
"""
self.dec_rep = 0
def get_num_T_bits(self):
"""
Returns the number of 1 (True) bits at positions bpos
from 0 to len-1 inclusive.
Returns
-------
int
"""
count = 0
for bpos in range(self.len):
if self.bit_is_T(bpos):
count += 1
return count
def find_T_bit_to_right_of(self, bpos):
"""
Returns position of 1 (True) bit immediately to the right of
position bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
"""
if bpos <= 0:
return -1
right_T_bit = bpos
mask = (1 << right_T_bit)
found_it = False
while True:
right_T_bit -= 1
mask >>= 1
found_it = ((self.dec_rep & mask) == mask)
if right_T_bit == 0 or found_it:
break
if found_it:
return right_T_bit
else:
return -1
def find_T_bit_to_left_of(self, bpos):
"""
Returns position of 1 (True) bit immediately to the left of position
bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
"""
if bpos >= self.len-1:
return -1
left_T_bit = bpos
mask = (1 << left_T_bit)
found_it = False
while True:
left_T_bit += 1
mask <<= 1
found_it = ((self.dec_rep & mask) == mask)
if left_T_bit == self.len-1 or found_it:
break
if found_it:
return left_T_bit
else:
return -1
def find_leftmost_T_bit(self):
"""
Out of all 1 (True) bits, returns position of the leftmost one of
those to the the left of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int
"""
if self.bit_is_T(self.len-1):
return self.len-1
else:
return self.find_T_bit_to_right_of(self.len - 1)
def find_rightmost_T_bit(self):
"""
Out of all 1 (True) bits, returns position of the rightmost one of
those to the the right of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int
"""
if self.bit_is_T(0):
return 0
else:
return self.find_T_bit_to_left_of(0)
def get_bit_string(self):
"""
Returns self represented as string of length self.len of ones and
zeros. If bit_str is the output, [int(x) for x in bit_str] will turn
result to list of ints.
Returns
-------
str
"""
bit_str = ''
for beta in range(self.len-1, -1, -1):
if self.bit_is_T(beta):
bit_str += '1'
else:
bit_str += '0'
return bit_str
def __str__(self):
"""
Readable representation of self
Returns
-------
str
"""
return self.get_bit_string() + '=' + str(self.dec_rep)
@staticmethod
def new_with_T_on_diff(bvec1, bvec2):
"""
Given two BitVectors bevc1 and bvec2, this return a BitVector which
is a bitwise xor (mod 2 sum) of the bits of bvec1 and bvec2.
Parameters
----------
bvec1 : BitVector
bvec2 : BitVector
Returns
-------
BitVector
"""
assert bvec1.len == bvec2.len
return BitVector(bvec1.len, bvec1.dec_rep ^ bvec2.dec_rep)
@staticmethod
def get_lazy_from_normal(bit_len, normal):
"""
Throughout Qubiter, we will often refer to "Gray Code" as "lazy
ordering". In lazy ordering with bit_len many bits, one gives a
sequence of bit vectors of length bit_len, so that two adjacent
items of the sequence differ by just one bit. For example 000=0,
100=4, 110=6, 010=2, 011=3, 111=7, 101=5, 001=1. Each element of
this sequence represented as an int will be called lazy, and each
int in the sequence 0, 1, 2, 3, 4, 5, 6, 7 will be called normal.
Normal ordering is usually called dictionary ordering. Normal and
lazy sequences both start at 0.
Suppose bit_len = 3. The lazy sequence 000, 100, 110, 010, 011, 111,
101, 001 is easily obtained from the "standard" lazy sequence 000,
001, 011, 010, 110, 111, 101, 100 by "reflecting" each sequence
term. We will use the second sequence because it is more common in
the literature.
References
----------
1. Martin Gardener, "Knotted DoughNuts and Other
Mathematical Entertainments", chapt. 2, "The Binary Gray Code"
2. "Numerical Recipies in C"
3. Many books on Discrete Mathematics for CompSci types
4. On the web, in Eric's Treasure Trove/Math/Gray Codes
Parameters
----------
bit_len : int
normal : int
Function returns the lazy int that corresponds to this normal int.
Returns
-------
int
"""
lazy_bvec = BitVector(bit_len, normal)
lazy = lazy_bvec.dec_rep
if bit_len > 1:
for m in range(bit_len-2, -1, -1):
# Look at bpos = m+1, if it's ON, then flip bpos=m.
# Remember that ^ is same as a mod2 sum.
lazy ^= (((normal >> m+1) & 1) << m)
return lazy
@staticmethod
def lazy_advance(old_normal, old_lazy):
"""
This method takes int "old_lazy" (which corresponds to bit vector
"old_normal"), and changes it to the next lazy int, "new_lazy" (
which corresponds to "new_normal").
example:
lazy sequence: 000, 001, 011, 010, 110, 111, 101, 100
old_lazy = 011
old_normal = 2 = 010
new_normal = 3 = 011
mask = (new_normal & ~old_normal) = 011 & 101 = 001
new_lazy = new_normal ^ mask = 011 ^ 001 = 010
Parameters
----------
old_normal : int
old_lazy : int
Returns
-------
int, int
"""
new_normal = old_normal + 1
new_lazy = old_lazy ^ (new_normal & ~(new_normal-1))
return new_normal, new_lazy
if __name__ == "__main__":
def main():
for length in [3, 4]:
print('\nlength=', length)
print('normal, lazy, lazy in binary:')
max_normal = (1 << length) - 1
normal = 0
lazy = 0
while normal <= max_normal:
lazy_bvec = BitVector(length, lazy)
print(normal, lazy, BitVector.get_bit_string(lazy_bvec))
normal, lazy = BitVector.lazy_advance(normal, lazy)
main()
|
find_T_bit_to_left_of
|
Returns position of 1 (True) bit immediately to the left of position
bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
|
class BitVector:
"""
This class uses an int called dec_rep as a vector of self.len many bits,
where self.len <= self.max_len. The class wraps some common bitwise
operations, and some less common ones too (like Gray coding that is
needed by Qubiter). In some cases, the bitwise manipulation might be
more succinct than the corresponding function in this wrapper, but the
wrapper function's name spells out in words what is wanted.
Attributes
----------
dec_rep : int
decimal representation, the int whose binary representation carries
a bit vector of length self.len
len : int
the length of the bit vector
max_len : int
maximum self.len allowed
"""
def __init__(self, length, dec_rep):
"""
Constructor
Parameters
----------
length : int
dec_rep : int
Returns
-------
"""
self.len = length
self.dec_rep = dec_rep
self.max_len = 16
assert length <= self.max_len, "bit vector is too long"
assert length > 0, "bit vector len must be >=1"
@staticmethod
def copy(bvec):
"""
Copy constructor, returns a new BitVector which is a copy of the
BitVector bvec.
Parameters
----------
bvec : BitVector
Returns
-------
BitVector
"""
return BitVector(bvec.len, bvec.dec_rep)
def bit_is_T(self, bpos):
"""
Returns True iff bit at position bpos is 1 (True)
Parameters
----------
bpos : int
bit position
Returns
-------
bool
"""
assert bpos < self.len, "bit position is too large"
mask = (1 << bpos)
return (self.dec_rep & mask) == mask
def set_bit_T(self, bpos):
"""
Sets to 1 (True) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None
"""
assert bpos < self.len, "bit position is too large"
self.dec_rep |= (1 << bpos)
def set_bit_F(self, bpos):
"""
Sets to 0 (False) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None
"""
assert bpos < self.len, "bit position is too large"
self.dec_rep &= ~(1 << bpos)
def set_all_bits_T(self):
"""
Sets to 1 (True) the bits of self at position bpos
from 0 to len-1 inclusive.
Returns
-------
None
"""
# example: len = 3, dec_rep becomes 15 = b0111
self.dec_rep = (1 << self.len + 1) - 1
def set_all_bits_F(self):
"""
Sets to 0 (False) the bits of self at positions bpos
from 0 to len-1 inclusive.
Returns
-------
None
"""
self.dec_rep = 0
def get_num_T_bits(self):
"""
Returns the number of 1 (True) bits at positions bpos
from 0 to len-1 inclusive.
Returns
-------
int
"""
count = 0
for bpos in range(self.len):
if self.bit_is_T(bpos):
count += 1
return count
def find_T_bit_to_right_of(self, bpos):
"""
Returns position of 1 (True) bit immediately to the right of
position bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
"""
if bpos <= 0:
return -1
right_T_bit = bpos
mask = (1 << right_T_bit)
found_it = False
while True:
right_T_bit -= 1
mask >>= 1
found_it = ((self.dec_rep & mask) == mask)
if right_T_bit == 0 or found_it:
break
if found_it:
return right_T_bit
else:
return -1
# MASKED: find_T_bit_to_left_of function (lines 184-215)
def find_leftmost_T_bit(self):
"""
Out of all 1 (True) bits, returns position of the leftmost one of
those to the the left of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int
"""
if self.bit_is_T(self.len-1):
return self.len-1
else:
return self.find_T_bit_to_right_of(self.len - 1)
def find_rightmost_T_bit(self):
"""
Out of all 1 (True) bits, returns position of the rightmost one of
those to the the right of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int
"""
if self.bit_is_T(0):
return 0
else:
return self.find_T_bit_to_left_of(0)
def get_bit_string(self):
"""
Returns self represented as string of length self.len of ones and
zeros. If bit_str is the output, [int(x) for x in bit_str] will turn
result to list of ints.
Returns
-------
str
"""
bit_str = ''
for beta in range(self.len-1, -1, -1):
if self.bit_is_T(beta):
bit_str += '1'
else:
bit_str += '0'
return bit_str
def __str__(self):
"""
Readable representation of self
Returns
-------
str
"""
return self.get_bit_string() + '=' + str(self.dec_rep)
@staticmethod
def new_with_T_on_diff(bvec1, bvec2):
"""
Given two BitVectors bevc1 and bvec2, this return a BitVector which
is a bitwise xor (mod 2 sum) of the bits of bvec1 and bvec2.
Parameters
----------
bvec1 : BitVector
bvec2 : BitVector
Returns
-------
BitVector
"""
assert bvec1.len == bvec2.len
return BitVector(bvec1.len, bvec1.dec_rep ^ bvec2.dec_rep)
@staticmethod
def get_lazy_from_normal(bit_len, normal):
"""
Throughout Qubiter, we will often refer to "Gray Code" as "lazy
ordering". In lazy ordering with bit_len many bits, one gives a
sequence of bit vectors of length bit_len, so that two adjacent
items of the sequence differ by just one bit. For example 000=0,
100=4, 110=6, 010=2, 011=3, 111=7, 101=5, 001=1. Each element of
this sequence represented as an int will be called lazy, and each
int in the sequence 0, 1, 2, 3, 4, 5, 6, 7 will be called normal.
Normal ordering is usually called dictionary ordering. Normal and
lazy sequences both start at 0.
Suppose bit_len = 3. The lazy sequence 000, 100, 110, 010, 011, 111,
101, 001 is easily obtained from the "standard" lazy sequence 000,
001, 011, 010, 110, 111, 101, 100 by "reflecting" each sequence
term. We will use the second sequence because it is more common in
the literature.
References
----------
1. Martin Gardener, "Knotted DoughNuts and Other
Mathematical Entertainments", chapt. 2, "The Binary Gray Code"
2. "Numerical Recipies in C"
3. Many books on Discrete Mathematics for CompSci types
4. On the web, in Eric's Treasure Trove/Math/Gray Codes
Parameters
----------
bit_len : int
normal : int
Function returns the lazy int that corresponds to this normal int.
Returns
-------
int
"""
lazy_bvec = BitVector(bit_len, normal)
lazy = lazy_bvec.dec_rep
if bit_len > 1:
for m in range(bit_len-2, -1, -1):
# Look at bpos = m+1, if it's ON, then flip bpos=m.
# Remember that ^ is same as a mod2 sum.
lazy ^= (((normal >> m+1) & 1) << m)
return lazy
@staticmethod
def lazy_advance(old_normal, old_lazy):
"""
This method takes int "old_lazy" (which corresponds to bit vector
"old_normal"), and changes it to the next lazy int, "new_lazy" (
which corresponds to "new_normal").
example:
lazy sequence: 000, 001, 011, 010, 110, 111, 101, 100
old_lazy = 011
old_normal = 2 = 010
new_normal = 3 = 011
mask = (new_normal & ~old_normal) = 011 & 101 = 001
new_lazy = new_normal ^ mask = 011 ^ 001 = 010
Parameters
----------
old_normal : int
old_lazy : int
Returns
-------
int, int
"""
new_normal = old_normal + 1
new_lazy = old_lazy ^ (new_normal & ~(new_normal-1))
return new_normal, new_lazy
if __name__ == "__main__":
def main():
for length in [3, 4]:
print('\nlength=', length)
print('normal, lazy, lazy in binary:')
max_normal = (1 << length) - 1
normal = 0
lazy = 0
while normal <= max_normal:
lazy_bvec = BitVector(length, lazy)
print(normal, lazy, BitVector.get_bit_string(lazy_bvec))
normal, lazy = BitVector.lazy_advance(normal, lazy)
main()
|
def find_T_bit_to_left_of(self, bpos):
"""
Returns position of 1 (True) bit immediately to the left of position
bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
"""
if bpos >= self.len-1:
return -1
left_T_bit = bpos
mask = (1 << left_T_bit)
found_it = False
while True:
left_T_bit += 1
mask <<= 1
found_it = ((self.dec_rep & mask) == mask)
if left_T_bit == self.len-1 or found_it:
break
if found_it:
return left_T_bit
else:
return -1
| 184 | 215 |
class BitVector:
"""
This class uses an int called dec_rep as a vector of self.len many bits,
where self.len <= self.max_len. The class wraps some common bitwise
operations, and some less common ones too (like Gray coding that is
needed by Qubiter). In some cases, the bitwise manipulation might be
more succinct than the corresponding function in this wrapper, but the
wrapper function's name spells out in words what is wanted.
Attributes
----------
dec_rep : int
decimal representation, the int whose binary representation carries
a bit vector of length self.len
len : int
the length of the bit vector
max_len : int
maximum self.len allowed
"""
def __init__(self, length, dec_rep):
"""
Constructor
Parameters
----------
length : int
dec_rep : int
Returns
-------
"""
self.len = length
self.dec_rep = dec_rep
self.max_len = 16
assert length <= self.max_len, "bit vector is too long"
assert length > 0, "bit vector len must be >=1"
@staticmethod
def copy(bvec):
"""
Copy constructor, returns a new BitVector which is a copy of the
BitVector bvec.
Parameters
----------
bvec : BitVector
Returns
-------
BitVector
"""
return BitVector(bvec.len, bvec.dec_rep)
def bit_is_T(self, bpos):
"""
Returns True iff bit at position bpos is 1 (True)
Parameters
----------
bpos : int
bit position
Returns
-------
bool
"""
assert bpos < self.len, "bit position is too large"
mask = (1 << bpos)
return (self.dec_rep & mask) == mask
def set_bit_T(self, bpos):
"""
Sets to 1 (True) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None
"""
assert bpos < self.len, "bit position is too large"
self.dec_rep |= (1 << bpos)
def set_bit_F(self, bpos):
"""
Sets to 0 (False) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None
"""
assert bpos < self.len, "bit position is too large"
self.dec_rep &= ~(1 << bpos)
def set_all_bits_T(self):
"""
Sets to 1 (True) the bits of self at position bpos
from 0 to len-1 inclusive.
Returns
-------
None
"""
# example: len = 3, dec_rep becomes 15 = b0111
self.dec_rep = (1 << self.len + 1) - 1
def set_all_bits_F(self):
"""
Sets to 0 (False) the bits of self at positions bpos
from 0 to len-1 inclusive.
Returns
-------
None
"""
self.dec_rep = 0
def get_num_T_bits(self):
"""
Returns the number of 1 (True) bits at positions bpos
from 0 to len-1 inclusive.
Returns
-------
int
"""
count = 0
for bpos in range(self.len):
if self.bit_is_T(bpos):
count += 1
return count
def find_T_bit_to_right_of(self, bpos):
"""
Returns position of 1 (True) bit immediately to the right of
position bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
"""
if bpos <= 0:
return -1
right_T_bit = bpos
mask = (1 << right_T_bit)
found_it = False
while True:
right_T_bit -= 1
mask >>= 1
found_it = ((self.dec_rep & mask) == mask)
if right_T_bit == 0 or found_it:
break
if found_it:
return right_T_bit
else:
return -1
def find_T_bit_to_left_of(self, bpos):
"""
Returns position of 1 (True) bit immediately to the left of position
bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
"""
if bpos >= self.len-1:
return -1
left_T_bit = bpos
mask = (1 << left_T_bit)
found_it = False
while True:
left_T_bit += 1
mask <<= 1
found_it = ((self.dec_rep & mask) == mask)
if left_T_bit == self.len-1 or found_it:
break
if found_it:
return left_T_bit
else:
return -1
def find_leftmost_T_bit(self):
"""
Out of all 1 (True) bits, returns position of the leftmost one of
those to the the left of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int
"""
if self.bit_is_T(self.len-1):
return self.len-1
else:
return self.find_T_bit_to_right_of(self.len - 1)
def find_rightmost_T_bit(self):
"""
Out of all 1 (True) bits, returns position of the rightmost one of
those to the the right of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int
"""
if self.bit_is_T(0):
return 0
else:
return self.find_T_bit_to_left_of(0)
def get_bit_string(self):
"""
Returns self represented as string of length self.len of ones and
zeros. If bit_str is the output, [int(x) for x in bit_str] will turn
result to list of ints.
Returns
-------
str
"""
bit_str = ''
for beta in range(self.len-1, -1, -1):
if self.bit_is_T(beta):
bit_str += '1'
else:
bit_str += '0'
return bit_str
def __str__(self):
"""
Readable representation of self
Returns
-------
str
"""
return self.get_bit_string() + '=' + str(self.dec_rep)
@staticmethod
def new_with_T_on_diff(bvec1, bvec2):
"""
Given two BitVectors bevc1 and bvec2, this return a BitVector which
is a bitwise xor (mod 2 sum) of the bits of bvec1 and bvec2.
Parameters
----------
bvec1 : BitVector
bvec2 : BitVector
Returns
-------
BitVector
"""
assert bvec1.len == bvec2.len
return BitVector(bvec1.len, bvec1.dec_rep ^ bvec2.dec_rep)
@staticmethod
def get_lazy_from_normal(bit_len, normal):
"""
Throughout Qubiter, we will often refer to "Gray Code" as "lazy
ordering". In lazy ordering with bit_len many bits, one gives a
sequence of bit vectors of length bit_len, so that two adjacent
items of the sequence differ by just one bit. For example 000=0,
100=4, 110=6, 010=2, 011=3, 111=7, 101=5, 001=1. Each element of
this sequence represented as an int will be called lazy, and each
int in the sequence 0, 1, 2, 3, 4, 5, 6, 7 will be called normal.
Normal ordering is usually called dictionary ordering. Normal and
lazy sequences both start at 0.
Suppose bit_len = 3. The lazy sequence 000, 100, 110, 010, 011, 111,
101, 001 is easily obtained from the "standard" lazy sequence 000,
001, 011, 010, 110, 111, 101, 100 by "reflecting" each sequence
term. We will use the second sequence because it is more common in
the literature.
References
----------
1. Martin Gardener, "Knotted DoughNuts and Other
Mathematical Entertainments", chapt. 2, "The Binary Gray Code"
2. "Numerical Recipies in C"
3. Many books on Discrete Mathematics for CompSci types
4. On the web, in Eric's Treasure Trove/Math/Gray Codes
Parameters
----------
bit_len : int
normal : int
Function returns the lazy int that corresponds to this normal int.
Returns
-------
int
"""
lazy_bvec = BitVector(bit_len, normal)
lazy = lazy_bvec.dec_rep
if bit_len > 1:
for m in range(bit_len-2, -1, -1):
# Look at bpos = m+1, if it's ON, then flip bpos=m.
# Remember that ^ is same as a mod2 sum.
lazy ^= (((normal >> m+1) & 1) << m)
return lazy
@staticmethod
def lazy_advance(old_normal, old_lazy):
"""
This method takes int "old_lazy" (which corresponds to bit vector
"old_normal"), and changes it to the next lazy int, "new_lazy" (
which corresponds to "new_normal").
example:
lazy sequence: 000, 001, 011, 010, 110, 111, 101, 100
old_lazy = 011
old_normal = 2 = 010
new_normal = 3 = 011
mask = (new_normal & ~old_normal) = 011 & 101 = 001
new_lazy = new_normal ^ mask = 011 ^ 001 = 010
Parameters
----------
old_normal : int
old_lazy : int
Returns
-------
int, int
"""
new_normal = old_normal + 1
new_lazy = old_lazy ^ (new_normal & ~(new_normal-1))
return new_normal, new_lazy
if __name__ == "__main__":
def main():
for length in [3, 4]:
print('\nlength=', length)
print('normal, lazy, lazy in binary:')
max_normal = (1 << length) - 1
normal = 0
lazy = 0
while normal <= max_normal:
lazy_bvec = BitVector(length, lazy)
print(normal, lazy, BitVector.get_bit_string(lazy_bvec))
normal, lazy = BitVector.lazy_advance(normal, lazy)
main()
|
get_bit_string
|
Returns self represented as string of length self.len of ones and
zeros. If bit_str is the output, [int(x) for x in bit_str] will turn
result to list of ints.
Returns
-------
str
|
class BitVector:
"""
This class uses an int called dec_rep as a vector of self.len many bits,
where self.len <= self.max_len. The class wraps some common bitwise
operations, and some less common ones too (like Gray coding that is
needed by Qubiter). In some cases, the bitwise manipulation might be
more succinct than the corresponding function in this wrapper, but the
wrapper function's name spells out in words what is wanted.
Attributes
----------
dec_rep : int
decimal representation, the int whose binary representation carries
a bit vector of length self.len
len : int
the length of the bit vector
max_len : int
maximum self.len allowed
"""
def __init__(self, length, dec_rep):
"""
Constructor
Parameters
----------
length : int
dec_rep : int
Returns
-------
"""
self.len = length
self.dec_rep = dec_rep
self.max_len = 16
assert length <= self.max_len, "bit vector is too long"
assert length > 0, "bit vector len must be >=1"
@staticmethod
def copy(bvec):
"""
Copy constructor, returns a new BitVector which is a copy of the
BitVector bvec.
Parameters
----------
bvec : BitVector
Returns
-------
BitVector
"""
return BitVector(bvec.len, bvec.dec_rep)
def bit_is_T(self, bpos):
"""
Returns True iff bit at position bpos is 1 (True)
Parameters
----------
bpos : int
bit position
Returns
-------
bool
"""
assert bpos < self.len, "bit position is too large"
mask = (1 << bpos)
return (self.dec_rep & mask) == mask
def set_bit_T(self, bpos):
"""
Sets to 1 (True) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None
"""
assert bpos < self.len, "bit position is too large"
self.dec_rep |= (1 << bpos)
def set_bit_F(self, bpos):
"""
Sets to 0 (False) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None
"""
assert bpos < self.len, "bit position is too large"
self.dec_rep &= ~(1 << bpos)
def set_all_bits_T(self):
"""
Sets to 1 (True) the bits of self at position bpos
from 0 to len-1 inclusive.
Returns
-------
None
"""
# example: len = 3, dec_rep becomes 15 = b0111
self.dec_rep = (1 << self.len + 1) - 1
def set_all_bits_F(self):
"""
Sets to 0 (False) the bits of self at positions bpos
from 0 to len-1 inclusive.
Returns
-------
None
"""
self.dec_rep = 0
def get_num_T_bits(self):
"""
Returns the number of 1 (True) bits at positions bpos
from 0 to len-1 inclusive.
Returns
-------
int
"""
count = 0
for bpos in range(self.len):
if self.bit_is_T(bpos):
count += 1
return count
def find_T_bit_to_right_of(self, bpos):
"""
Returns position of 1 (True) bit immediately to the right of
position bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
"""
if bpos <= 0:
return -1
right_T_bit = bpos
mask = (1 << right_T_bit)
found_it = False
while True:
right_T_bit -= 1
mask >>= 1
found_it = ((self.dec_rep & mask) == mask)
if right_T_bit == 0 or found_it:
break
if found_it:
return right_T_bit
else:
return -1
def find_T_bit_to_left_of(self, bpos):
"""
Returns position of 1 (True) bit immediately to the left of position
bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
"""
if bpos >= self.len-1:
return -1
left_T_bit = bpos
mask = (1 << left_T_bit)
found_it = False
while True:
left_T_bit += 1
mask <<= 1
found_it = ((self.dec_rep & mask) == mask)
if left_T_bit == self.len-1 or found_it:
break
if found_it:
return left_T_bit
else:
return -1
def find_leftmost_T_bit(self):
"""
Out of all 1 (True) bits, returns position of the leftmost one of
those to the the left of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int
"""
if self.bit_is_T(self.len-1):
return self.len-1
else:
return self.find_T_bit_to_right_of(self.len - 1)
def find_rightmost_T_bit(self):
"""
Out of all 1 (True) bits, returns position of the rightmost one of
those to the the right of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int
"""
if self.bit_is_T(0):
return 0
else:
return self.find_T_bit_to_left_of(0)
# MASKED: get_bit_string function (lines 249-266)
def __str__(self):
"""
Readable representation of self
Returns
-------
str
"""
return self.get_bit_string() + '=' + str(self.dec_rep)
@staticmethod
def new_with_T_on_diff(bvec1, bvec2):
"""
Given two BitVectors bevc1 and bvec2, this return a BitVector which
is a bitwise xor (mod 2 sum) of the bits of bvec1 and bvec2.
Parameters
----------
bvec1 : BitVector
bvec2 : BitVector
Returns
-------
BitVector
"""
assert bvec1.len == bvec2.len
return BitVector(bvec1.len, bvec1.dec_rep ^ bvec2.dec_rep)
@staticmethod
def get_lazy_from_normal(bit_len, normal):
"""
Throughout Qubiter, we will often refer to "Gray Code" as "lazy
ordering". In lazy ordering with bit_len many bits, one gives a
sequence of bit vectors of length bit_len, so that two adjacent
items of the sequence differ by just one bit. For example 000=0,
100=4, 110=6, 010=2, 011=3, 111=7, 101=5, 001=1. Each element of
this sequence represented as an int will be called lazy, and each
int in the sequence 0, 1, 2, 3, 4, 5, 6, 7 will be called normal.
Normal ordering is usually called dictionary ordering. Normal and
lazy sequences both start at 0.
Suppose bit_len = 3. The lazy sequence 000, 100, 110, 010, 011, 111,
101, 001 is easily obtained from the "standard" lazy sequence 000,
001, 011, 010, 110, 111, 101, 100 by "reflecting" each sequence
term. We will use the second sequence because it is more common in
the literature.
References
----------
1. Martin Gardener, "Knotted DoughNuts and Other
Mathematical Entertainments", chapt. 2, "The Binary Gray Code"
2. "Numerical Recipies in C"
3. Many books on Discrete Mathematics for CompSci types
4. On the web, in Eric's Treasure Trove/Math/Gray Codes
Parameters
----------
bit_len : int
normal : int
Function returns the lazy int that corresponds to this normal int.
Returns
-------
int
"""
lazy_bvec = BitVector(bit_len, normal)
lazy = lazy_bvec.dec_rep
if bit_len > 1:
for m in range(bit_len-2, -1, -1):
# Look at bpos = m+1, if it's ON, then flip bpos=m.
# Remember that ^ is same as a mod2 sum.
lazy ^= (((normal >> m+1) & 1) << m)
return lazy
@staticmethod
def lazy_advance(old_normal, old_lazy):
"""
This method takes int "old_lazy" (which corresponds to bit vector
"old_normal"), and changes it to the next lazy int, "new_lazy" (
which corresponds to "new_normal").
example:
lazy sequence: 000, 001, 011, 010, 110, 111, 101, 100
old_lazy = 011
old_normal = 2 = 010
new_normal = 3 = 011
mask = (new_normal & ~old_normal) = 011 & 101 = 001
new_lazy = new_normal ^ mask = 011 ^ 001 = 010
Parameters
----------
old_normal : int
old_lazy : int
Returns
-------
int, int
"""
new_normal = old_normal + 1
new_lazy = old_lazy ^ (new_normal & ~(new_normal-1))
return new_normal, new_lazy
if __name__ == "__main__":
def main():
for length in [3, 4]:
print('\nlength=', length)
print('normal, lazy, lazy in binary:')
max_normal = (1 << length) - 1
normal = 0
lazy = 0
while normal <= max_normal:
lazy_bvec = BitVector(length, lazy)
print(normal, lazy, BitVector.get_bit_string(lazy_bvec))
normal, lazy = BitVector.lazy_advance(normal, lazy)
main()
|
def get_bit_string(self):
"""
Returns self represented as string of length self.len of ones and
zeros. If bit_str is the output, [int(x) for x in bit_str] will turn
result to list of ints.
Returns
-------
str
"""
bit_str = ''
for beta in range(self.len-1, -1, -1):
if self.bit_is_T(beta):
bit_str += '1'
else:
bit_str += '0'
return bit_str
| 249 | 266 |
class BitVector:
"""
This class uses an int called dec_rep as a vector of self.len many bits,
where self.len <= self.max_len. The class wraps some common bitwise
operations, and some less common ones too (like Gray coding that is
needed by Qubiter). In some cases, the bitwise manipulation might be
more succinct than the corresponding function in this wrapper, but the
wrapper function's name spells out in words what is wanted.
Attributes
----------
dec_rep : int
decimal representation, the int whose binary representation carries
a bit vector of length self.len
len : int
the length of the bit vector
max_len : int
maximum self.len allowed
"""
def __init__(self, length, dec_rep):
"""
Constructor
Parameters
----------
length : int
dec_rep : int
Returns
-------
"""
self.len = length
self.dec_rep = dec_rep
self.max_len = 16
assert length <= self.max_len, "bit vector is too long"
assert length > 0, "bit vector len must be >=1"
@staticmethod
def copy(bvec):
"""
Copy constructor, returns a new BitVector which is a copy of the
BitVector bvec.
Parameters
----------
bvec : BitVector
Returns
-------
BitVector
"""
return BitVector(bvec.len, bvec.dec_rep)
def bit_is_T(self, bpos):
"""
Returns True iff bit at position bpos is 1 (True)
Parameters
----------
bpos : int
bit position
Returns
-------
bool
"""
assert bpos < self.len, "bit position is too large"
mask = (1 << bpos)
return (self.dec_rep & mask) == mask
def set_bit_T(self, bpos):
"""
Sets to 1 (True) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None
"""
assert bpos < self.len, "bit position is too large"
self.dec_rep |= (1 << bpos)
def set_bit_F(self, bpos):
"""
Sets to 0 (False) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None
"""
assert bpos < self.len, "bit position is too large"
self.dec_rep &= ~(1 << bpos)
def set_all_bits_T(self):
"""
Sets to 1 (True) the bits of self at position bpos
from 0 to len-1 inclusive.
Returns
-------
None
"""
# example: len = 3, dec_rep becomes 15 = b0111
self.dec_rep = (1 << self.len + 1) - 1
def set_all_bits_F(self):
"""
Sets to 0 (False) the bits of self at positions bpos
from 0 to len-1 inclusive.
Returns
-------
None
"""
self.dec_rep = 0
def get_num_T_bits(self):
"""
Returns the number of 1 (True) bits at positions bpos
from 0 to len-1 inclusive.
Returns
-------
int
"""
count = 0
for bpos in range(self.len):
if self.bit_is_T(bpos):
count += 1
return count
def find_T_bit_to_right_of(self, bpos):
"""
Returns position of 1 (True) bit immediately to the right of
position bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
"""
if bpos <= 0:
return -1
right_T_bit = bpos
mask = (1 << right_T_bit)
found_it = False
while True:
right_T_bit -= 1
mask >>= 1
found_it = ((self.dec_rep & mask) == mask)
if right_T_bit == 0 or found_it:
break
if found_it:
return right_T_bit
else:
return -1
def find_T_bit_to_left_of(self, bpos):
"""
Returns position of 1 (True) bit immediately to the left of position
bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
"""
if bpos >= self.len-1:
return -1
left_T_bit = bpos
mask = (1 << left_T_bit)
found_it = False
while True:
left_T_bit += 1
mask <<= 1
found_it = ((self.dec_rep & mask) == mask)
if left_T_bit == self.len-1 or found_it:
break
if found_it:
return left_T_bit
else:
return -1
def find_leftmost_T_bit(self):
"""
Out of all 1 (True) bits, returns position of the leftmost one of
those to the the left of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int
"""
if self.bit_is_T(self.len-1):
return self.len-1
else:
return self.find_T_bit_to_right_of(self.len - 1)
def find_rightmost_T_bit(self):
"""
Out of all 1 (True) bits, returns position of the rightmost one of
those to the the right of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int
"""
if self.bit_is_T(0):
return 0
else:
return self.find_T_bit_to_left_of(0)
def get_bit_string(self):
"""
Returns self represented as string of length self.len of ones and
zeros. If bit_str is the output, [int(x) for x in bit_str] will turn
result to list of ints.
Returns
-------
str
"""
bit_str = ''
for beta in range(self.len-1, -1, -1):
if self.bit_is_T(beta):
bit_str += '1'
else:
bit_str += '0'
return bit_str
def __str__(self):
"""
Readable representation of self
Returns
-------
str
"""
return self.get_bit_string() + '=' + str(self.dec_rep)
@staticmethod
def new_with_T_on_diff(bvec1, bvec2):
"""
Given two BitVectors bevc1 and bvec2, this return a BitVector which
is a bitwise xor (mod 2 sum) of the bits of bvec1 and bvec2.
Parameters
----------
bvec1 : BitVector
bvec2 : BitVector
Returns
-------
BitVector
"""
assert bvec1.len == bvec2.len
return BitVector(bvec1.len, bvec1.dec_rep ^ bvec2.dec_rep)
@staticmethod
def get_lazy_from_normal(bit_len, normal):
"""
Throughout Qubiter, we will often refer to "Gray Code" as "lazy
ordering". In lazy ordering with bit_len many bits, one gives a
sequence of bit vectors of length bit_len, so that two adjacent
items of the sequence differ by just one bit. For example 000=0,
100=4, 110=6, 010=2, 011=3, 111=7, 101=5, 001=1. Each element of
this sequence represented as an int will be called lazy, and each
int in the sequence 0, 1, 2, 3, 4, 5, 6, 7 will be called normal.
Normal ordering is usually called dictionary ordering. Normal and
lazy sequences both start at 0.
Suppose bit_len = 3. The lazy sequence 000, 100, 110, 010, 011, 111,
101, 001 is easily obtained from the "standard" lazy sequence 000,
001, 011, 010, 110, 111, 101, 100 by "reflecting" each sequence
term. We will use the second sequence because it is more common in
the literature.
References
----------
1. Martin Gardener, "Knotted DoughNuts and Other
Mathematical Entertainments", chapt. 2, "The Binary Gray Code"
2. "Numerical Recipies in C"
3. Many books on Discrete Mathematics for CompSci types
4. On the web, in Eric's Treasure Trove/Math/Gray Codes
Parameters
----------
bit_len : int
normal : int
Function returns the lazy int that corresponds to this normal int.
Returns
-------
int
"""
lazy_bvec = BitVector(bit_len, normal)
lazy = lazy_bvec.dec_rep
if bit_len > 1:
for m in range(bit_len-2, -1, -1):
# Look at bpos = m+1, if it's ON, then flip bpos=m.
# Remember that ^ is same as a mod2 sum.
lazy ^= (((normal >> m+1) & 1) << m)
return lazy
@staticmethod
def lazy_advance(old_normal, old_lazy):
"""
This method takes int "old_lazy" (which corresponds to bit vector
"old_normal"), and changes it to the next lazy int, "new_lazy" (
which corresponds to "new_normal").
example:
lazy sequence: 000, 001, 011, 010, 110, 111, 101, 100
old_lazy = 011
old_normal = 2 = 010
new_normal = 3 = 011
mask = (new_normal & ~old_normal) = 011 & 101 = 001
new_lazy = new_normal ^ mask = 011 ^ 001 = 010
Parameters
----------
old_normal : int
old_lazy : int
Returns
-------
int, int
"""
new_normal = old_normal + 1
new_lazy = old_lazy ^ (new_normal & ~(new_normal-1))
return new_normal, new_lazy
if __name__ == "__main__":
def main():
for length in [3, 4]:
print('\nlength=', length)
print('normal, lazy, lazy in binary:')
max_normal = (1 << length) - 1
normal = 0
lazy = 0
while normal <= max_normal:
lazy_bvec = BitVector(length, lazy)
print(normal, lazy, BitVector.get_bit_string(lazy_bvec))
normal, lazy = BitVector.lazy_advance(normal, lazy)
main()
|
lazy_advance
|
This method takes int "old_lazy" (which corresponds to bit vector
"old_normal"), and changes it to the next lazy int, "new_lazy" (
which corresponds to "new_normal").
example:
lazy sequence: 000, 001, 011, 010, 110, 111, 101, 100
old_lazy = 011
old_normal = 2 = 010
new_normal = 3 = 011
mask = (new_normal & ~old_normal) = 011 & 101 = 001
new_lazy = new_normal ^ mask = 011 ^ 001 = 010
Parameters
----------
old_normal : int
old_lazy : int
Returns
-------
int, int
|
class BitVector:
"""
This class uses an int called dec_rep as a vector of self.len many bits,
where self.len <= self.max_len. The class wraps some common bitwise
operations, and some less common ones too (like Gray coding that is
needed by Qubiter). In some cases, the bitwise manipulation might be
more succinct than the corresponding function in this wrapper, but the
wrapper function's name spells out in words what is wanted.
Attributes
----------
dec_rep : int
decimal representation, the int whose binary representation carries
a bit vector of length self.len
len : int
the length of the bit vector
max_len : int
maximum self.len allowed
"""
def __init__(self, length, dec_rep):
"""
Constructor
Parameters
----------
length : int
dec_rep : int
Returns
-------
"""
self.len = length
self.dec_rep = dec_rep
self.max_len = 16
assert length <= self.max_len, "bit vector is too long"
assert length > 0, "bit vector len must be >=1"
@staticmethod
def copy(bvec):
"""
Copy constructor, returns a new BitVector which is a copy of the
BitVector bvec.
Parameters
----------
bvec : BitVector
Returns
-------
BitVector
"""
return BitVector(bvec.len, bvec.dec_rep)
def bit_is_T(self, bpos):
"""
Returns True iff bit at position bpos is 1 (True)
Parameters
----------
bpos : int
bit position
Returns
-------
bool
"""
assert bpos < self.len, "bit position is too large"
mask = (1 << bpos)
return (self.dec_rep & mask) == mask
def set_bit_T(self, bpos):
"""
Sets to 1 (True) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None
"""
assert bpos < self.len, "bit position is too large"
self.dec_rep |= (1 << bpos)
def set_bit_F(self, bpos):
"""
Sets to 0 (False) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None
"""
assert bpos < self.len, "bit position is too large"
self.dec_rep &= ~(1 << bpos)
def set_all_bits_T(self):
"""
Sets to 1 (True) the bits of self at position bpos
from 0 to len-1 inclusive.
Returns
-------
None
"""
# example: len = 3, dec_rep becomes 15 = b0111
self.dec_rep = (1 << self.len + 1) - 1
def set_all_bits_F(self):
"""
Sets to 0 (False) the bits of self at positions bpos
from 0 to len-1 inclusive.
Returns
-------
None
"""
self.dec_rep = 0
def get_num_T_bits(self):
"""
Returns the number of 1 (True) bits at positions bpos
from 0 to len-1 inclusive.
Returns
-------
int
"""
count = 0
for bpos in range(self.len):
if self.bit_is_T(bpos):
count += 1
return count
def find_T_bit_to_right_of(self, bpos):
"""
Returns position of 1 (True) bit immediately to the right of
position bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
"""
if bpos <= 0:
return -1
right_T_bit = bpos
mask = (1 << right_T_bit)
found_it = False
while True:
right_T_bit -= 1
mask >>= 1
found_it = ((self.dec_rep & mask) == mask)
if right_T_bit == 0 or found_it:
break
if found_it:
return right_T_bit
else:
return -1
def find_T_bit_to_left_of(self, bpos):
"""
Returns position of 1 (True) bit immediately to the left of position
bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
"""
if bpos >= self.len-1:
return -1
left_T_bit = bpos
mask = (1 << left_T_bit)
found_it = False
while True:
left_T_bit += 1
mask <<= 1
found_it = ((self.dec_rep & mask) == mask)
if left_T_bit == self.len-1 or found_it:
break
if found_it:
return left_T_bit
else:
return -1
def find_leftmost_T_bit(self):
"""
Out of all 1 (True) bits, returns position of the leftmost one of
those to the the left of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int
"""
if self.bit_is_T(self.len-1):
return self.len-1
else:
return self.find_T_bit_to_right_of(self.len - 1)
def find_rightmost_T_bit(self):
"""
Out of all 1 (True) bits, returns position of the rightmost one of
those to the the right of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int
"""
if self.bit_is_T(0):
return 0
else:
return self.find_T_bit_to_left_of(0)
def get_bit_string(self):
"""
Returns self represented as string of length self.len of ones and
zeros. If bit_str is the output, [int(x) for x in bit_str] will turn
result to list of ints.
Returns
-------
str
"""
bit_str = ''
for beta in range(self.len-1, -1, -1):
if self.bit_is_T(beta):
bit_str += '1'
else:
bit_str += '0'
return bit_str
def __str__(self):
"""
Readable representation of self
Returns
-------
str
"""
return self.get_bit_string() + '=' + str(self.dec_rep)
@staticmethod
def new_with_T_on_diff(bvec1, bvec2):
"""
Given two BitVectors bevc1 and bvec2, this return a BitVector which
is a bitwise xor (mod 2 sum) of the bits of bvec1 and bvec2.
Parameters
----------
bvec1 : BitVector
bvec2 : BitVector
Returns
-------
BitVector
"""
assert bvec1.len == bvec2.len
return BitVector(bvec1.len, bvec1.dec_rep ^ bvec2.dec_rep)
@staticmethod
def get_lazy_from_normal(bit_len, normal):
"""
Throughout Qubiter, we will often refer to "Gray Code" as "lazy
ordering". In lazy ordering with bit_len many bits, one gives a
sequence of bit vectors of length bit_len, so that two adjacent
items of the sequence differ by just one bit. For example 000=0,
100=4, 110=6, 010=2, 011=3, 111=7, 101=5, 001=1. Each element of
this sequence represented as an int will be called lazy, and each
int in the sequence 0, 1, 2, 3, 4, 5, 6, 7 will be called normal.
Normal ordering is usually called dictionary ordering. Normal and
lazy sequences both start at 0.
Suppose bit_len = 3. The lazy sequence 000, 100, 110, 010, 011, 111,
101, 001 is easily obtained from the "standard" lazy sequence 000,
001, 011, 010, 110, 111, 101, 100 by "reflecting" each sequence
term. We will use the second sequence because it is more common in
the literature.
References
----------
1. Martin Gardener, "Knotted DoughNuts and Other
Mathematical Entertainments", chapt. 2, "The Binary Gray Code"
2. "Numerical Recipies in C"
3. Many books on Discrete Mathematics for CompSci types
4. On the web, in Eric's Treasure Trove/Math/Gray Codes
Parameters
----------
bit_len : int
normal : int
Function returns the lazy int that corresponds to this normal int.
Returns
-------
int
"""
lazy_bvec = BitVector(bit_len, normal)
lazy = lazy_bvec.dec_rep
if bit_len > 1:
for m in range(bit_len-2, -1, -1):
# Look at bpos = m+1, if it's ON, then flip bpos=m.
# Remember that ^ is same as a mod2 sum.
lazy ^= (((normal >> m+1) & 1) << m)
return lazy
# MASKED: lazy_advance function (lines 346-377)
if __name__ == "__main__":
def main():
for length in [3, 4]:
print('\nlength=', length)
print('normal, lazy, lazy in binary:')
max_normal = (1 << length) - 1
normal = 0
lazy = 0
while normal <= max_normal:
lazy_bvec = BitVector(length, lazy)
print(normal, lazy, BitVector.get_bit_string(lazy_bvec))
normal, lazy = BitVector.lazy_advance(normal, lazy)
main()
|
@staticmethod
def lazy_advance(old_normal, old_lazy):
"""
This method takes int "old_lazy" (which corresponds to bit vector
"old_normal"), and changes it to the next lazy int, "new_lazy" (
which corresponds to "new_normal").
example:
lazy sequence: 000, 001, 011, 010, 110, 111, 101, 100
old_lazy = 011
old_normal = 2 = 010
new_normal = 3 = 011
mask = (new_normal & ~old_normal) = 011 & 101 = 001
new_lazy = new_normal ^ mask = 011 ^ 001 = 010
Parameters
----------
old_normal : int
old_lazy : int
Returns
-------
int, int
"""
new_normal = old_normal + 1
new_lazy = old_lazy ^ (new_normal & ~(new_normal-1))
return new_normal, new_lazy
| 346 | 377 |
class BitVector:
"""
This class uses an int called dec_rep as a vector of self.len many bits,
where self.len <= self.max_len. The class wraps some common bitwise
operations, and some less common ones too (like Gray coding that is
needed by Qubiter). In some cases, the bitwise manipulation might be
more succinct than the corresponding function in this wrapper, but the
wrapper function's name spells out in words what is wanted.
Attributes
----------
dec_rep : int
decimal representation, the int whose binary representation carries
a bit vector of length self.len
len : int
the length of the bit vector
max_len : int
maximum self.len allowed
"""
def __init__(self, length, dec_rep):
"""
Constructor
Parameters
----------
length : int
dec_rep : int
Returns
-------
"""
self.len = length
self.dec_rep = dec_rep
self.max_len = 16
assert length <= self.max_len, "bit vector is too long"
assert length > 0, "bit vector len must be >=1"
@staticmethod
def copy(bvec):
"""
Copy constructor, returns a new BitVector which is a copy of the
BitVector bvec.
Parameters
----------
bvec : BitVector
Returns
-------
BitVector
"""
return BitVector(bvec.len, bvec.dec_rep)
def bit_is_T(self, bpos):
"""
Returns True iff bit at position bpos is 1 (True)
Parameters
----------
bpos : int
bit position
Returns
-------
bool
"""
assert bpos < self.len, "bit position is too large"
mask = (1 << bpos)
return (self.dec_rep & mask) == mask
def set_bit_T(self, bpos):
"""
Sets to 1 (True) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None
"""
assert bpos < self.len, "bit position is too large"
self.dec_rep |= (1 << bpos)
def set_bit_F(self, bpos):
"""
Sets to 0 (False) the bit of self at position bpos.
Parameters
----------
bpos : int
bit position
Returns
-------
None
"""
assert bpos < self.len, "bit position is too large"
self.dec_rep &= ~(1 << bpos)
def set_all_bits_T(self):
"""
Sets to 1 (True) the bits of self at position bpos
from 0 to len-1 inclusive.
Returns
-------
None
"""
# example: len = 3, dec_rep becomes 15 = b0111
self.dec_rep = (1 << self.len + 1) - 1
def set_all_bits_F(self):
"""
Sets to 0 (False) the bits of self at positions bpos
from 0 to len-1 inclusive.
Returns
-------
None
"""
self.dec_rep = 0
def get_num_T_bits(self):
"""
Returns the number of 1 (True) bits at positions bpos
from 0 to len-1 inclusive.
Returns
-------
int
"""
count = 0
for bpos in range(self.len):
if self.bit_is_T(bpos):
count += 1
return count
def find_T_bit_to_right_of(self, bpos):
"""
Returns position of 1 (True) bit immediately to the right of
position bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
"""
if bpos <= 0:
return -1
right_T_bit = bpos
mask = (1 << right_T_bit)
found_it = False
while True:
right_T_bit -= 1
mask >>= 1
found_it = ((self.dec_rep & mask) == mask)
if right_T_bit == 0 or found_it:
break
if found_it:
return right_T_bit
else:
return -1
def find_T_bit_to_left_of(self, bpos):
"""
Returns position of 1 (True) bit immediately to the left of position
bpos. Returns -1 if there is no such bit.
Parameters
----------
bpos : int
bit position
Returns
-------
int
"""
if bpos >= self.len-1:
return -1
left_T_bit = bpos
mask = (1 << left_T_bit)
found_it = False
while True:
left_T_bit += 1
mask <<= 1
found_it = ((self.dec_rep & mask) == mask)
if left_T_bit == self.len-1 or found_it:
break
if found_it:
return left_T_bit
else:
return -1
def find_leftmost_T_bit(self):
"""
Out of all 1 (True) bits, returns position of the leftmost one of
those to the the left of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int
"""
if self.bit_is_T(self.len-1):
return self.len-1
else:
return self.find_T_bit_to_right_of(self.len - 1)
def find_rightmost_T_bit(self):
"""
Out of all 1 (True) bits, returns position of the rightmost one of
those to the the right of position bpos. Returns -1 if there is no
such bit.
Returns
-------
int
"""
if self.bit_is_T(0):
return 0
else:
return self.find_T_bit_to_left_of(0)
def get_bit_string(self):
"""
Returns self represented as string of length self.len of ones and
zeros. If bit_str is the output, [int(x) for x in bit_str] will turn
result to list of ints.
Returns
-------
str
"""
bit_str = ''
for beta in range(self.len-1, -1, -1):
if self.bit_is_T(beta):
bit_str += '1'
else:
bit_str += '0'
return bit_str
def __str__(self):
"""
Readable representation of self
Returns
-------
str
"""
return self.get_bit_string() + '=' + str(self.dec_rep)
@staticmethod
def new_with_T_on_diff(bvec1, bvec2):
"""
Given two BitVectors bevc1 and bvec2, this return a BitVector which
is a bitwise xor (mod 2 sum) of the bits of bvec1 and bvec2.
Parameters
----------
bvec1 : BitVector
bvec2 : BitVector
Returns
-------
BitVector
"""
assert bvec1.len == bvec2.len
return BitVector(bvec1.len, bvec1.dec_rep ^ bvec2.dec_rep)
@staticmethod
def get_lazy_from_normal(bit_len, normal):
"""
Throughout Qubiter, we will often refer to "Gray Code" as "lazy
ordering". In lazy ordering with bit_len many bits, one gives a
sequence of bit vectors of length bit_len, so that two adjacent
items of the sequence differ by just one bit. For example 000=0,
100=4, 110=6, 010=2, 011=3, 111=7, 101=5, 001=1. Each element of
this sequence represented as an int will be called lazy, and each
int in the sequence 0, 1, 2, 3, 4, 5, 6, 7 will be called normal.
Normal ordering is usually called dictionary ordering. Normal and
lazy sequences both start at 0.
Suppose bit_len = 3. The lazy sequence 000, 100, 110, 010, 011, 111,
101, 001 is easily obtained from the "standard" lazy sequence 000,
001, 011, 010, 110, 111, 101, 100 by "reflecting" each sequence
term. We will use the second sequence because it is more common in
the literature.
References
----------
1. Martin Gardener, "Knotted DoughNuts and Other
Mathematical Entertainments", chapt. 2, "The Binary Gray Code"
2. "Numerical Recipies in C"
3. Many books on Discrete Mathematics for CompSci types
4. On the web, in Eric's Treasure Trove/Math/Gray Codes
Parameters
----------
bit_len : int
normal : int
Function returns the lazy int that corresponds to this normal int.
Returns
-------
int
"""
lazy_bvec = BitVector(bit_len, normal)
lazy = lazy_bvec.dec_rep
if bit_len > 1:
for m in range(bit_len-2, -1, -1):
# Look at bpos = m+1, if it's ON, then flip bpos=m.
# Remember that ^ is same as a mod2 sum.
lazy ^= (((normal >> m+1) & 1) << m)
return lazy
@staticmethod
def lazy_advance(old_normal, old_lazy):
"""
This method takes int "old_lazy" (which corresponds to bit vector
"old_normal"), and changes it to the next lazy int, "new_lazy" (
which corresponds to "new_normal").
example:
lazy sequence: 000, 001, 011, 010, 110, 111, 101, 100
old_lazy = 011
old_normal = 2 = 010
new_normal = 3 = 011
mask = (new_normal & ~old_normal) = 011 & 101 = 001
new_lazy = new_normal ^ mask = 011 ^ 001 = 010
Parameters
----------
old_normal : int
old_lazy : int
Returns
-------
int, int
"""
new_normal = old_normal + 1
new_lazy = old_lazy ^ (new_normal & ~(new_normal-1))
return new_normal, new_lazy
if __name__ == "__main__":
def main():
for length in [3, 4]:
print('\nlength=', length)
print('normal, lazy, lazy in binary:')
max_normal = (1 << length) - 1
normal = 0
lazy = 0
while normal <= max_normal:
lazy_bvec = BitVector(length, lazy)
print(normal, lazy, BitVector.get_bit_string(lazy_bvec))
normal, lazy = BitVector.lazy_advance(normal, lazy)
main()
|
__init__
|
Creates an instance of this class.
Arguments:
sol_dim (int): The dimensionality of the problem space
max_iters (int): The maximum number of iterations to perform during optimization
popsize (int): The number of candidate solutions to be sampled at every iteration
num_elites (int): The number of top solutions that will be used to obtain the distribution
at the next iteration.
upper_bound (np.array): An array of upper bounds
lower_bound (np.array): An array of lower bounds
epsilon (float): A minimum variance. If the maximum variance drops below epsilon, optimization is
stopped.
alpha (float): Controls how much of the previous mean and variance is used for the next iteration.
next_mean = alpha * old_mean + (1 - alpha) * elite_mean, and similarly for variance.
|
import os
import os.path as osp
import pickle
import random
from collections import deque
from datetime import datetime
import gym
import numpy as np
import scipy.stats as stats
import torch
import torch.optim as optim
from mpi4py import MPI
import dr
from dr.ppo.models import Policy, ValueNet
from dr.ppo.train import one_train_iter
from dr.ppo.utils import set_torch_num_threads, RunningMeanStd, traj_seg_gen
COMM = MPI.COMM_WORLD
import tensorboardX
def set_global_seeds(i):
torch.manual_seed(i)
np.random.seed(i)
random.seed(i)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(i)
class CEMOptimizer(object):
# MASKED: __init__ function (lines 35-65)
def reset(self):
pass
def obtain_solution(self, init_mean, init_var):
"""Optimizes the cost function using the provided initial candidate distribution
Arguments:
init_mean (np.ndarray): The mean of the initial candidate distribution.
init_var (np.ndarray): The variance of the initial candidate distribution.
"""
mean, var, t = init_mean, init_var, 0
X = stats.truncnorm(-2, 2, loc=np.zeros_like(mean), scale=np.ones_like(var))
costs_hist = []
mean_hist = []
var_hist = []
while (t < self.max_iters) and np.max(var) > self.epsilon:
lb_dist, ub_dist = mean - self.lb, self.ub - mean
constrained_var = np.minimum(np.minimum(np.square(lb_dist / 2), np.square(ub_dist / 2)), var)
samples = X.rvs(size=[self.popsize, self.sol_dim]) * np.sqrt(constrained_var) + mean
samples = samples.astype(np.float32)
costs = self.cost_function(samples, t)
elites = samples[np.argsort(costs)][:self.num_elites]
new_mean = np.mean(elites, axis=0)
new_var = np.var(elites, axis=0)
mean = self.alpha * mean + (1 - self.alpha) * new_mean
var = self.alpha * var + (1 - self.alpha) * new_var
for i, m in enumerate(mean):
self.writer.add_scalar(f'mean/{i}', m, t)
for i, m in enumerate(var):
self.writer.add_scalar(f'var/{i}', m, t)
self.writer.add_scalar('costs', np.min(costs), t)
t += 1
costs_hist.append(costs)
mean_hist.append(mean)
var_hist.append(var)
self.writer.close()
return dict(
mean_hist=mean_hist, costs_hist=costs_hist, var_hist=var_hist
)
class PPO_Pytorch(object):
def __init__(self, experiment_name, env_params, train_params, **kwargs):
self.experiment_name = experiment_name
self.env_params = env_params
self.train_params = train_params
self.log_dir = osp.join('runs',
f'seed_{str(train_params["seed"])}_{datetime.now().strftime("%b%d_%H-%M-%S")}')
os.makedirs(self.log_dir, exist_ok=True)
with open(osp.join(self.log_dir, 'env_params.pkl'), 'wb+') as f:
pickle.dump(env_params, f)
with open(osp.join(self.log_dir, 'train_params.pkl'), 'wb+') as f:
pickle.dump(train_params, f)
super().__init__()
def train(self, env_id, backend,
train_params, env_params,
means, stdevs):
# Unpack params
hid_size = train_params['hid_size']
pol_init_std = train_params['pol_init_std']
adam_epsilon = train_params['adam_epsilon']
optim_stepsize = train_params['optim_stepsize']
# Convert means and stdevs to dict format
assert len(means) == len(stdevs), (len(means), len(stdevs))
mean_dict, stdev_dict = PPO_Pytorch._vec_to_dict(env_id, means, stdevs)
# Set parameter of env
self.env_dist.default_parameters = mean_dict
self.env_dist.stdev_dict = stdev_dict
env = self.env_dist.root_env
set_torch_num_threads()
# Construct policy and value network
pol = Policy(env.observation_space, env.action_space, hid_size, pol_init_std)
pol_optim = optim.Adam(pol.parameters(), lr=optim_stepsize, eps=adam_epsilon)
val = ValueNet(env.observation_space, hid_size)
val_optim = optim.Adam(val.parameters(), lr=optim_stepsize, eps=adam_epsilon)
optims = {'pol_optim': pol_optim, 'val_optim': val_optim}
num_train_iter = int(train_params['num_timesteps'] / train_params['ts_per_batch'])
# Buffer for running statistics
eps_rets_buff = deque(maxlen=100)
eps_rets_mean_buff = []
state_running_m_std = RunningMeanStd(shape=env.observation_space.shape)
# seg_gen is a generator that yields the training data points
seg_gen = traj_seg_gen(self.env_dist, pol, val, state_running_m_std, env_params, train_params)
eval_perfs = []
for iter_i in range(num_train_iter):
one_train_iter(pol, val, optims,
iter_i, eps_rets_buff, eps_rets_mean_buff, seg_gen,
state_running_m_std, train_params, self.eval_envs, eval_perfs)
return eval_perfs
def run(self):
set_global_seeds(self.train_params['seed'])
# Unpack params
env_name = self.env_params['env_name']
backend = self.env_params['backend']
stdev = self.train_params['env_dist_stdev']
mean_scale = self.train_params['mean_scale']
seed = self.train_params['seed']
num_eval_env = self.train_params['num_eval_env']
collision_detector = self.env_params['collision_detector']
# Obtain the initial value for the simulation parameters
env_dist = dr.dist.Normal(env_name, backend, mean_scale=mean_scale)
init_mean_param = PPO_Pytorch._dict_to_vec(env_name, env_dist.default_parameters)
init_stdev_param = np.array([stdev] * len(init_mean_param), dtype=np.float32)
cem_init_mean = np.concatenate((init_mean_param, init_stdev_param))
cem_init_stdev = np.array([1.0] * len(cem_init_mean), dtype=np.float32)
# Make envs that will be reused for training and eval
self.env_dist = dr.dist.Normal(env_name, backend)
self.env_dist.backend.set_collision_detector(env_dist.root_env, collision_detector)
self.env_dist.seed(seed)
if env_name == 'Walker':
self.eval_envs = [gym.make('Walker2d-v2') for _ in range(num_eval_env)]
elif env_name == 'Hopper':
self.eval_envs = [gym.make('Hopper-v2') for _ in range(num_eval_env)]
else:
exit('Unrecognized environment')
if COMM.Get_rank() == 0:
self.optimizer = CEMOptimizer(
sol_dim=30,
max_iters=300,
popsize=self.train_params['pop_size'],
num_elites=self.train_params['num_elites'],
cost_function=self._cost_function,
lower_bound=0.0,
# TODO: setting the upper bound this way, means that
# if the initial dimension value is 0, then the upper bound is 0
upper_bound=cem_init_mean * 5.0,
alpha=0.75,
viz_dir=self.log_dir
)
# This is buggy
# https://github.com/lanpa/tensorboardX/issues/345
self.optimizer.writer.add_text('env_params', str(self.env_params), 0)
self.optimizer.writer.add_text('train_params', str(self.train_params), 0)
res = self.optimizer.obtain_solution(cem_init_mean, cem_init_stdev)
path = osp.join(self.log_dir, 'res.pkl')
with open(path, 'wb') as f:
pickle.dump(res, f)
COMM.Abort(0)
else:
while True:
args = COMM.recv(source=0)
r = self.train(*args)
COMM.send(r, dest=0)
def _cost_function(self, samples, cem_timestep):
print(f'cem_timestep: {cem_timestep}')
env_name = self.env_params['env_name']
backend = self.env_params['backend']
pop_size = self.train_params['pop_size']
argss = [(env_name, backend,
self.train_params, self.env_params,
samples[rank][:len(samples[rank]) // 2],
samples[rank][len(samples[rank]) // 2:]) for rank in range(len(samples))]
# Send args to other MPI processes
for rank in range(1, COMM.size):
COMM.send(argss[rank], dest=rank)
# Obtain results for all args
r = self.train(*argss[0])
reses = [(0, r)] # 0 is the rank of this process
# Receive results from the other processes:
for rank in range(1, COMM.size):
r = COMM.recv(source=rank)
reses.append((rank, r))
reses = sorted(reses, key=lambda k: k[0])
print(reses)
# Get the index of the highest performing model in population
# and write result to tensorboard
max_idx = 0
max_perf = max(reses[0][1]) # 0 is the result of process rank 0. 1 brings us the eval perf list
for i, item in enumerate(reses):
perf = max(item[1])
if perf > max_perf:
max_perf = perf
max_idx = i
# Obtain the "costs" that the CEM cost function should return
costs = [- max(i[1]) for i in reses]
print(costs)
print(min(costs))
print()
return costs
@classmethod
def _dict_to_vec(cls, env_id, d):
return np.concatenate((
d['mass'],
d['damping'],
[d['gravity']]
)).flatten().copy()
@classmethod
def _vec_to_dict(cls, env_id, means, stdevs):
if env_id == 'Walker':
return dict(
mass=means[:7],
damping=means[7:-1],
gravity=means[-1]
), dict(
mass=stdevs[:7],
damping=stdevs[7:-1],
gravity=stdevs[-1]
)
elif env_id == 'Hopper':
return dict(
mass=means[:4],
damping=means[4:-1],
gravity=means[-1]
), dict(
mass=stdevs[:4],
damping=stdevs[4:-1],
gravity=stdevs[-1]
)
else:
exit('Unrecognized environment')
|
def __init__(self, sol_dim, max_iters, popsize, num_elites, cost_function,
upper_bound=None, lower_bound=None, epsilon=0.001, alpha=0.25, viz_dir=None):
"""Creates an instance of this class.
Arguments:
sol_dim (int): The dimensionality of the problem space
max_iters (int): The maximum number of iterations to perform during optimization
popsize (int): The number of candidate solutions to be sampled at every iteration
num_elites (int): The number of top solutions that will be used to obtain the distribution
at the next iteration.
upper_bound (np.array): An array of upper bounds
lower_bound (np.array): An array of lower bounds
epsilon (float): A minimum variance. If the maximum variance drops below epsilon, optimization is
stopped.
alpha (float): Controls how much of the previous mean and variance is used for the next iteration.
next_mean = alpha * old_mean + (1 - alpha) * elite_mean, and similarly for variance.
"""
super().__init__()
self.sol_dim, self.max_iters, self.popsize, self.num_elites = sol_dim, max_iters, popsize, num_elites
self.ub, self.lb = upper_bound, lower_bound
self.epsilon, self.alpha = epsilon, alpha
self.cost_function = cost_function
if viz_dir is not None:
self.writer = tensorboardX.SummaryWriter(viz_dir)
else:
self.writer = tensorboardX.SummaryWriter()
if num_elites > popsize:
raise ValueError("Number of elites must be at most the population size.")
| 35 | 65 |
import os
import os.path as osp
import pickle
import random
from collections import deque
from datetime import datetime
import gym
import numpy as np
import scipy.stats as stats
import torch
import torch.optim as optim
from mpi4py import MPI
import dr
from dr.ppo.models import Policy, ValueNet
from dr.ppo.train import one_train_iter
from dr.ppo.utils import set_torch_num_threads, RunningMeanStd, traj_seg_gen
COMM = MPI.COMM_WORLD
import tensorboardX
def set_global_seeds(i):
torch.manual_seed(i)
np.random.seed(i)
random.seed(i)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(i)
class CEMOptimizer(object):
def __init__(self, sol_dim, max_iters, popsize, num_elites, cost_function,
upper_bound=None, lower_bound=None, epsilon=0.001, alpha=0.25, viz_dir=None):
"""Creates an instance of this class.
Arguments:
sol_dim (int): The dimensionality of the problem space
max_iters (int): The maximum number of iterations to perform during optimization
popsize (int): The number of candidate solutions to be sampled at every iteration
num_elites (int): The number of top solutions that will be used to obtain the distribution
at the next iteration.
upper_bound (np.array): An array of upper bounds
lower_bound (np.array): An array of lower bounds
epsilon (float): A minimum variance. If the maximum variance drops below epsilon, optimization is
stopped.
alpha (float): Controls how much of the previous mean and variance is used for the next iteration.
next_mean = alpha * old_mean + (1 - alpha) * elite_mean, and similarly for variance.
"""
super().__init__()
self.sol_dim, self.max_iters, self.popsize, self.num_elites = sol_dim, max_iters, popsize, num_elites
self.ub, self.lb = upper_bound, lower_bound
self.epsilon, self.alpha = epsilon, alpha
self.cost_function = cost_function
if viz_dir is not None:
self.writer = tensorboardX.SummaryWriter(viz_dir)
else:
self.writer = tensorboardX.SummaryWriter()
if num_elites > popsize:
raise ValueError("Number of elites must be at most the population size.")
def reset(self):
pass
def obtain_solution(self, init_mean, init_var):
"""Optimizes the cost function using the provided initial candidate distribution
Arguments:
init_mean (np.ndarray): The mean of the initial candidate distribution.
init_var (np.ndarray): The variance of the initial candidate distribution.
"""
mean, var, t = init_mean, init_var, 0
X = stats.truncnorm(-2, 2, loc=np.zeros_like(mean), scale=np.ones_like(var))
costs_hist = []
mean_hist = []
var_hist = []
while (t < self.max_iters) and np.max(var) > self.epsilon:
lb_dist, ub_dist = mean - self.lb, self.ub - mean
constrained_var = np.minimum(np.minimum(np.square(lb_dist / 2), np.square(ub_dist / 2)), var)
samples = X.rvs(size=[self.popsize, self.sol_dim]) * np.sqrt(constrained_var) + mean
samples = samples.astype(np.float32)
costs = self.cost_function(samples, t)
elites = samples[np.argsort(costs)][:self.num_elites]
new_mean = np.mean(elites, axis=0)
new_var = np.var(elites, axis=0)
mean = self.alpha * mean + (1 - self.alpha) * new_mean
var = self.alpha * var + (1 - self.alpha) * new_var
for i, m in enumerate(mean):
self.writer.add_scalar(f'mean/{i}', m, t)
for i, m in enumerate(var):
self.writer.add_scalar(f'var/{i}', m, t)
self.writer.add_scalar('costs', np.min(costs), t)
t += 1
costs_hist.append(costs)
mean_hist.append(mean)
var_hist.append(var)
self.writer.close()
return dict(
mean_hist=mean_hist, costs_hist=costs_hist, var_hist=var_hist
)
class PPO_Pytorch(object):
def __init__(self, experiment_name, env_params, train_params, **kwargs):
self.experiment_name = experiment_name
self.env_params = env_params
self.train_params = train_params
self.log_dir = osp.join('runs',
f'seed_{str(train_params["seed"])}_{datetime.now().strftime("%b%d_%H-%M-%S")}')
os.makedirs(self.log_dir, exist_ok=True)
with open(osp.join(self.log_dir, 'env_params.pkl'), 'wb+') as f:
pickle.dump(env_params, f)
with open(osp.join(self.log_dir, 'train_params.pkl'), 'wb+') as f:
pickle.dump(train_params, f)
super().__init__()
def train(self, env_id, backend,
train_params, env_params,
means, stdevs):
# Unpack params
hid_size = train_params['hid_size']
pol_init_std = train_params['pol_init_std']
adam_epsilon = train_params['adam_epsilon']
optim_stepsize = train_params['optim_stepsize']
# Convert means and stdevs to dict format
assert len(means) == len(stdevs), (len(means), len(stdevs))
mean_dict, stdev_dict = PPO_Pytorch._vec_to_dict(env_id, means, stdevs)
# Set parameter of env
self.env_dist.default_parameters = mean_dict
self.env_dist.stdev_dict = stdev_dict
env = self.env_dist.root_env
set_torch_num_threads()
# Construct policy and value network
pol = Policy(env.observation_space, env.action_space, hid_size, pol_init_std)
pol_optim = optim.Adam(pol.parameters(), lr=optim_stepsize, eps=adam_epsilon)
val = ValueNet(env.observation_space, hid_size)
val_optim = optim.Adam(val.parameters(), lr=optim_stepsize, eps=adam_epsilon)
optims = {'pol_optim': pol_optim, 'val_optim': val_optim}
num_train_iter = int(train_params['num_timesteps'] / train_params['ts_per_batch'])
# Buffer for running statistics
eps_rets_buff = deque(maxlen=100)
eps_rets_mean_buff = []
state_running_m_std = RunningMeanStd(shape=env.observation_space.shape)
# seg_gen is a generator that yields the training data points
seg_gen = traj_seg_gen(self.env_dist, pol, val, state_running_m_std, env_params, train_params)
eval_perfs = []
for iter_i in range(num_train_iter):
one_train_iter(pol, val, optims,
iter_i, eps_rets_buff, eps_rets_mean_buff, seg_gen,
state_running_m_std, train_params, self.eval_envs, eval_perfs)
return eval_perfs
def run(self):
set_global_seeds(self.train_params['seed'])
# Unpack params
env_name = self.env_params['env_name']
backend = self.env_params['backend']
stdev = self.train_params['env_dist_stdev']
mean_scale = self.train_params['mean_scale']
seed = self.train_params['seed']
num_eval_env = self.train_params['num_eval_env']
collision_detector = self.env_params['collision_detector']
# Obtain the initial value for the simulation parameters
env_dist = dr.dist.Normal(env_name, backend, mean_scale=mean_scale)
init_mean_param = PPO_Pytorch._dict_to_vec(env_name, env_dist.default_parameters)
init_stdev_param = np.array([stdev] * len(init_mean_param), dtype=np.float32)
cem_init_mean = np.concatenate((init_mean_param, init_stdev_param))
cem_init_stdev = np.array([1.0] * len(cem_init_mean), dtype=np.float32)
# Make envs that will be reused for training and eval
self.env_dist = dr.dist.Normal(env_name, backend)
self.env_dist.backend.set_collision_detector(env_dist.root_env, collision_detector)
self.env_dist.seed(seed)
if env_name == 'Walker':
self.eval_envs = [gym.make('Walker2d-v2') for _ in range(num_eval_env)]
elif env_name == 'Hopper':
self.eval_envs = [gym.make('Hopper-v2') for _ in range(num_eval_env)]
else:
exit('Unrecognized environment')
if COMM.Get_rank() == 0:
self.optimizer = CEMOptimizer(
sol_dim=30,
max_iters=300,
popsize=self.train_params['pop_size'],
num_elites=self.train_params['num_elites'],
cost_function=self._cost_function,
lower_bound=0.0,
# TODO: setting the upper bound this way, means that
# if the initial dimension value is 0, then the upper bound is 0
upper_bound=cem_init_mean * 5.0,
alpha=0.75,
viz_dir=self.log_dir
)
# This is buggy
# https://github.com/lanpa/tensorboardX/issues/345
self.optimizer.writer.add_text('env_params', str(self.env_params), 0)
self.optimizer.writer.add_text('train_params', str(self.train_params), 0)
res = self.optimizer.obtain_solution(cem_init_mean, cem_init_stdev)
path = osp.join(self.log_dir, 'res.pkl')
with open(path, 'wb') as f:
pickle.dump(res, f)
COMM.Abort(0)
else:
while True:
args = COMM.recv(source=0)
r = self.train(*args)
COMM.send(r, dest=0)
def _cost_function(self, samples, cem_timestep):
print(f'cem_timestep: {cem_timestep}')
env_name = self.env_params['env_name']
backend = self.env_params['backend']
pop_size = self.train_params['pop_size']
argss = [(env_name, backend,
self.train_params, self.env_params,
samples[rank][:len(samples[rank]) // 2],
samples[rank][len(samples[rank]) // 2:]) for rank in range(len(samples))]
# Send args to other MPI processes
for rank in range(1, COMM.size):
COMM.send(argss[rank], dest=rank)
# Obtain results for all args
r = self.train(*argss[0])
reses = [(0, r)] # 0 is the rank of this process
# Receive results from the other processes:
for rank in range(1, COMM.size):
r = COMM.recv(source=rank)
reses.append((rank, r))
reses = sorted(reses, key=lambda k: k[0])
print(reses)
# Get the index of the highest performing model in population
# and write result to tensorboard
max_idx = 0
max_perf = max(reses[0][1]) # 0 is the result of process rank 0. 1 brings us the eval perf list
for i, item in enumerate(reses):
perf = max(item[1])
if perf > max_perf:
max_perf = perf
max_idx = i
# Obtain the "costs" that the CEM cost function should return
costs = [- max(i[1]) for i in reses]
print(costs)
print(min(costs))
print()
return costs
@classmethod
def _dict_to_vec(cls, env_id, d):
return np.concatenate((
d['mass'],
d['damping'],
[d['gravity']]
)).flatten().copy()
@classmethod
def _vec_to_dict(cls, env_id, means, stdevs):
if env_id == 'Walker':
return dict(
mass=means[:7],
damping=means[7:-1],
gravity=means[-1]
), dict(
mass=stdevs[:7],
damping=stdevs[7:-1],
gravity=stdevs[-1]
)
elif env_id == 'Hopper':
return dict(
mass=means[:4],
damping=means[4:-1],
gravity=means[-1]
), dict(
mass=stdevs[:4],
damping=stdevs[4:-1],
gravity=stdevs[-1]
)
else:
exit('Unrecognized environment')
|
plotfft
|
This functions computes the fft of a signal, returning the frequency
and their magnitude values.
Parameters
----------
s: array-like
the input signal.
fmax: int
the sampling frequency.
doplot: boolean
a variable to indicate whether the plot is done or not.
Returns
-------
f: array-like
the frequency values (xx axis)
fs: array-like
the amplitude of the frequency values (yy axis)
|
import pylab as pl
import numpy as np
from os import path
from numpy import abs, linspace, sin, pi, int16
import pandas
# MASKED: plotfft function (lines 8-33)
def synthbeats2(duration, meanhr=60, stdhr=1, samplingfreq=250):
#Minimaly based on the parameters from:
#http://physionet.cps.unizar.es/physiotools/ecgsyn/Matlab/ecgsyn.m
#Inputs: duration in seconds
#Returns: signal, peaks
ibi = 60 / float(meanhr) * samplingfreq
sibi = ibi - 60 / (float(meanhr) - stdhr) * samplingfreq
peaks = np.arange(0, duration * samplingfreq, ibi)
peaks[1:] = peaks[1:] + np.random.randn(len(peaks) - 1) * sibi
if peaks[-1] >= duration * samplingfreq:
peaks = peaks[:-1]
peaks = peaks.astype('int')
signal = np.zeros(duration * samplingfreq)
signal[peaks] = 1.0
return signal, peaks
def synthbeats(duration, meanhr=60, stdhr=1, samplingfreq=250, sinfreq=None):
#Minimaly based on the parameters from:
#http://physionet.cps.unizar.es/physiotools/ecgsyn/Matlab/ecgsyn.m
#If freq exist it will be used to generate a sin instead of using rand
#Inputs: duration in seconds
#Returns: signal, peaks
t = np.arange(duration * samplingfreq) / float(samplingfreq)
signal = np.zeros(len(t))
print(len(t))
print(len(signal))
if sinfreq == None:
npeaks = 1.2 * (duration * meanhr / 60)
# add 20% more beats for some cummulative error
hr = pl.randn(npeaks) * stdhr + meanhr
peaks = pl.cumsum(60. / hr) * samplingfreq
peaks = peaks.astype('int')
peaks = peaks[peaks < t[-1] * samplingfreq]
else:
hr = meanhr + sin(2 * pi * t * sinfreq) * float(stdhr)
index = int(60. / hr[0] * samplingfreq)
peaks = []
while index < len(t):
peaks += [index]
index += int(60. / hr[index] * samplingfreq)
signal[peaks] = 1.0
return t, signal, peaks
def load_with_cache(file_, recache=False, sampling=1,
columns=None, temp_dir='.', data_type='int16'):
"""@brief This function loads a file from the current directory and saves
the cached file to later executions. It's also possible to make a recache
or a subsampling of the signal and choose only a few columns of the signal,
to accelerate the opening process.
@param file String: the name of the file to open.
@param recache Boolean: indication whether it's done recache or not
(default = false).
@param sampling Integer: the sampling step. if 1, the signal isn't
sampled (default = 1).
@param columns Array-Like: the columns to read from the file. if None,
all columns are considered (default = None).
@return data Array-Like: the data from the file.
TODO: Should save cache in a different directory
TODO: Create test function and check size of generated files
TODO: receive a file handle
"""
cfile = '%s.npy' % file_
if (not path.exists(cfile)) or recache:
if columns == None:
data = np.loadtxt(file_)[::sampling, :]
else:
data = np.loadtxt(file_)[::sampling, columns]
np.save(cfile, data.astype(data_type))
else:
data = np.load(cfile)
return data
def load_data(filename):
"""
:rtype : numpy matrix
"""
data = pandas.read_csv(filename, header=None, delimiter='\t', skiprows=9)
return data.as_matrix()
|
def plotfft(s, fmax, doplot=False):
""" This functions computes the fft of a signal, returning the frequency
and their magnitude values.
Parameters
----------
s: array-like
the input signal.
fmax: int
the sampling frequency.
doplot: boolean
a variable to indicate whether the plot is done or not.
Returns
-------
f: array-like
the frequency values (xx axis)
fs: array-like
the amplitude of the frequency values (yy axis)
"""
fs = abs(np.fft.fft(s))
f = linspace(0, fmax / 2, len(s) / 2)
if doplot:
pl.plot(f[1:len(s) / 2], fs[1:len(s) / 2])
return (f[1:len(s) / 2].copy(), fs[1:len(s) / 2].copy())
| 8 | 33 |
import pylab as pl
import numpy as np
from os import path
from numpy import abs, linspace, sin, pi, int16
import pandas
def plotfft(s, fmax, doplot=False):
""" This functions computes the fft of a signal, returning the frequency
and their magnitude values.
Parameters
----------
s: array-like
the input signal.
fmax: int
the sampling frequency.
doplot: boolean
a variable to indicate whether the plot is done or not.
Returns
-------
f: array-like
the frequency values (xx axis)
fs: array-like
the amplitude of the frequency values (yy axis)
"""
fs = abs(np.fft.fft(s))
f = linspace(0, fmax / 2, len(s) / 2)
if doplot:
pl.plot(f[1:len(s) / 2], fs[1:len(s) / 2])
return (f[1:len(s) / 2].copy(), fs[1:len(s) / 2].copy())
def synthbeats2(duration, meanhr=60, stdhr=1, samplingfreq=250):
#Minimaly based on the parameters from:
#http://physionet.cps.unizar.es/physiotools/ecgsyn/Matlab/ecgsyn.m
#Inputs: duration in seconds
#Returns: signal, peaks
ibi = 60 / float(meanhr) * samplingfreq
sibi = ibi - 60 / (float(meanhr) - stdhr) * samplingfreq
peaks = np.arange(0, duration * samplingfreq, ibi)
peaks[1:] = peaks[1:] + np.random.randn(len(peaks) - 1) * sibi
if peaks[-1] >= duration * samplingfreq:
peaks = peaks[:-1]
peaks = peaks.astype('int')
signal = np.zeros(duration * samplingfreq)
signal[peaks] = 1.0
return signal, peaks
def synthbeats(duration, meanhr=60, stdhr=1, samplingfreq=250, sinfreq=None):
#Minimaly based on the parameters from:
#http://physionet.cps.unizar.es/physiotools/ecgsyn/Matlab/ecgsyn.m
#If freq exist it will be used to generate a sin instead of using rand
#Inputs: duration in seconds
#Returns: signal, peaks
t = np.arange(duration * samplingfreq) / float(samplingfreq)
signal = np.zeros(len(t))
print(len(t))
print(len(signal))
if sinfreq == None:
npeaks = 1.2 * (duration * meanhr / 60)
# add 20% more beats for some cummulative error
hr = pl.randn(npeaks) * stdhr + meanhr
peaks = pl.cumsum(60. / hr) * samplingfreq
peaks = peaks.astype('int')
peaks = peaks[peaks < t[-1] * samplingfreq]
else:
hr = meanhr + sin(2 * pi * t * sinfreq) * float(stdhr)
index = int(60. / hr[0] * samplingfreq)
peaks = []
while index < len(t):
peaks += [index]
index += int(60. / hr[index] * samplingfreq)
signal[peaks] = 1.0
return t, signal, peaks
def load_with_cache(file_, recache=False, sampling=1,
columns=None, temp_dir='.', data_type='int16'):
"""@brief This function loads a file from the current directory and saves
the cached file to later executions. It's also possible to make a recache
or a subsampling of the signal and choose only a few columns of the signal,
to accelerate the opening process.
@param file String: the name of the file to open.
@param recache Boolean: indication whether it's done recache or not
(default = false).
@param sampling Integer: the sampling step. if 1, the signal isn't
sampled (default = 1).
@param columns Array-Like: the columns to read from the file. if None,
all columns are considered (default = None).
@return data Array-Like: the data from the file.
TODO: Should save cache in a different directory
TODO: Create test function and check size of generated files
TODO: receive a file handle
"""
cfile = '%s.npy' % file_
if (not path.exists(cfile)) or recache:
if columns == None:
data = np.loadtxt(file_)[::sampling, :]
else:
data = np.loadtxt(file_)[::sampling, columns]
np.save(cfile, data.astype(data_type))
else:
data = np.load(cfile)
return data
def load_data(filename):
"""
:rtype : numpy matrix
"""
data = pandas.read_csv(filename, header=None, delimiter='\t', skiprows=9)
return data.as_matrix()
|
decorated_function
|
Decorator to check if the user is allowed access to the app.
If user is allowed, return the decorated function.
Otherwise, return an error page with corresponding message.
|
# -*- coding: utf-8 -*-
import json
import logging
from collections import defaultdict
from functools import wraps
from logging.config import dictConfig
from subprocess import call
import redis
import requests
from flask import Flask, Response, redirect, render_template, request, session, url_for
from flask_migrate import Migrate
from pylti.flask import lti
from redis.exceptions import ConnectionError
from rq import Queue, get_current_job
from rq.exceptions import NoSuchJobError
from rq.job import Job
from sqlalchemy.sql import text
import config
from models import Course, Extension, Quiz, User, db
from utils import (
extend_quiz,
get_course,
get_or_create,
get_quizzes,
get_user,
missing_and_stale_quizzes,
search_students,
update_job,
)
conn = redis.from_url(config.REDIS_URL)
q = Queue("quizext", connection=conn)
app = Flask(__name__)
app.config.from_object("config")
dictConfig(config.LOGGING_CONFIG)
logger = logging.getLogger("app")
db.init_app(app)
migrate = Migrate(app, db)
json_headers = {
"Authorization": "Bearer " + config.API_KEY,
"Content-type": "application/json",
}
def check_valid_user(f):
# MASKED: decorated_function function (lines 53-90)
return decorated_function
def error(exception=None):
return Response(
render_template(
"error.html",
message=exception.get(
"exception", "Please contact your System Administrator."
),
)
)
@app.context_processor
def add_google_analytics_id():
return dict(GOOGLE_ANALYTICS=config.GOOGLE_ANALYTICS)
@app.route("/", methods=["POST", "GET"])
def index():
"""
Default app index.
"""
return "Please contact your System Administrator."
@app.route("/status", methods=["GET"])
def status(): # pragma: no cover
"""
Runs smoke tests and reports status
"""
try:
job_queue_length = len(q.jobs)
except ConnectionError:
job_queue_length = -1
status = {
"tool": "Quiz Extensions",
"checks": {
"index": False,
"xml": False,
"api_key": False,
"redis": False,
"db": False,
"worker": False,
},
"url": url_for("index", _external=True),
"api_url": config.API_URL,
"debug": app.debug,
"xml_url": url_for("xml", _external=True),
"job_queue": job_queue_length,
}
# Check index
try:
response = requests.get(url_for("index", _external=True), verify=False)
status["checks"]["index"] = (
response.text == "Please contact your System Administrator."
)
except Exception:
logger.exception("Index check failed.")
# Check xml
try:
response = requests.get(url_for("xml", _external=True), verify=False)
status["checks"]["xml"] = "application/xml" in response.headers.get(
"Content-Type"
)
except Exception:
logger.exception("XML check failed.")
# Check API Key
try:
response = requests.get(
"{}users/self".format(config.API_URL),
headers={"Authorization": "Bearer " + config.API_KEY},
)
status["checks"]["api_key"] = response.status_code == 200
except Exception:
logger.exception("API Key check failed.")
# Check redis
try:
response = conn.echo("test")
status["checks"]["redis"] = response == b"test"
except ConnectionError:
logger.exception("Redis connection failed.")
# Check DB connection
try:
db.session.query(text("1")).all()
status["checks"]["db"] = True
except Exception:
logger.exception("DB connection failed.")
# Check RQ Worker
status["checks"]["worker"] = (
call('ps aux | grep "rq worker" | grep "quizext" | grep -v grep', shell=True)
== 0
)
# Overall health check - if all checks are True
status["healthy"] = all(v is True for k, v in status["checks"].items())
return Response(json.dumps(status), mimetype="application/json")
@app.route("/lti.xml", methods=["GET"])
def xml():
"""
Returns the lti.xml file for the app.
"""
from urllib.parse import urlparse
domain = urlparse(request.url_root).netloc
return Response(
render_template("lti.xml", tool_id=config.LTI_TOOL_ID, domain=domain),
mimetype="application/xml",
)
@app.route("/quiz/<course_id>/", methods=["GET"])
@check_valid_user
@lti(error=error, request="session", role="staff", app=app)
def quiz(lti=lti, course_id=None):
"""
Main landing page for the app.
Displays a page to the user that allows them to select students
to moderate quizzes for.
"""
return render_template(
"userselect.html", course_id=course_id, current_page_number=1
)
@app.route("/refresh/<course_id>/", methods=["POST"])
def refresh(course_id=None):
"""
Creates a new `refresh_background` job.
:param course_id: The Canvas ID of the Course.
:type course_id: int
:rtype: flask.Response
:returns: A JSON-formatted response containing a url for the started job.
"""
job = q.enqueue_call(func=refresh_background, args=(course_id,))
return Response(
json.dumps({"refresh_job_url": url_for("job_status", job_key=job.get_id())}),
mimetype="application/json",
status=202,
)
@app.route("/update/<course_id>/", methods=["POST"])
@check_valid_user
@lti(error=error, request="session", role="staff", app=app)
def update(lti=lti, course_id=None):
"""
Creates a new `update_background` job.
:param course_id: The Canvas ID of the Course.
:type coruse_id: int
:rtype: flask.Response
:returns: A JSON-formatted response containing urls for the started jobs.
"""
refresh_job = q.enqueue_call(func=refresh_background, args=(course_id,))
update_job = q.enqueue_call(
func=update_background,
args=(course_id, request.get_json()),
depends_on=refresh_job,
)
return Response(
json.dumps(
{
"refresh_job_url": url_for("job_status", job_key=refresh_job.get_id()),
"update_job_url": url_for("job_status", job_key=update_job.get_id()),
}
),
mimetype="application/json",
status=202,
)
@app.route("/jobs/<job_key>/", methods=["GET"])
def job_status(job_key):
try:
job = Job.fetch(job_key, connection=conn)
except NoSuchJobError:
return Response(
json.dumps(
{
"error": True,
"status_msg": "{} is not a valid job key.".format(job_key),
}
),
mimetype="application/json",
status=404,
)
if job.is_finished:
return Response(json.dumps(job.result), mimetype="application/json", status=200)
elif job.is_failed:
logger.error("Job {} failed.\n{}".format(job_key, job.exc_info))
return Response(
json.dumps(
{
"error": True,
"status_msg": "Job {} failed to complete.".format(job_key),
}
),
mimetype="application/json",
status=500,
)
else:
return Response(json.dumps(job.meta), mimetype="application/json", status=202)
def update_background(course_id, extension_dict):
"""
Update time on selected students' quizzes to a specified percentage.
:param course_id: The Canvas ID of the Course to update in
:type course_id: int
:param extension_dict: A dictionary that includes the percent of
time and a list of canvas user ids.
Example:
{
'percent': '300',
'user_ids': [
'0123456',
'1234567',
'9867543',
'5555555'
]
}
:type extension_dict: dict
"""
job = get_current_job()
update_job(job, 0, "Starting...", "started")
with app.app_context():
if not extension_dict:
update_job(job, 0, "Invalid Request", "failed", error=True)
logger.warning("Invalid Request: {}".format(extension_dict))
return job.meta
try:
course_json = get_course(course_id)
except requests.exceptions.HTTPError:
update_job(job, 0, "Course not found.", "failed", error=True)
logger.exception("Unable to find course #{}".format(course_id))
return job.meta
course_name = course_json.get("name", "<UNNAMED COURSE>")
user_ids = extension_dict.get("user_ids", [])
percent = extension_dict.get("percent", None)
if not percent:
update_job(job, 0, "`percent` field required.", "failed", error=True)
logger.warning(
"Percent field not provided. Request: {}".format(extension_dict)
)
return job.meta
course, created = get_or_create(db.session, Course, canvas_id=course_id)
course.course_name = course_name
db.session.commit()
for user_id in user_ids:
try:
canvas_user = get_user(course_id, user_id)
sortable_name = canvas_user.get("sortable_name", "<MISSING NAME>")
sis_id = canvas_user.get("sis_user_id")
except requests.exceptions.HTTPError:
# Unable to find user. Log and skip them.
logger.warning(
"Unable to find user #{} in course #{}".format(user_id, course_id)
)
continue
user, created = get_or_create(db.session, User, canvas_id=user_id)
user.sortable_name = sortable_name
user.sis_id = sis_id
db.session.commit()
# create/update extension
extension, created = get_or_create(
db.session, Extension, course_id=course.id, user_id=user.id
)
extension.percent = percent
db.session.commit()
quizzes = get_quizzes(course_id)
num_quizzes = len(quizzes)
quiz_time_list = []
unchanged_quiz_time_list = []
if num_quizzes < 1:
update_job(
job,
0,
"Sorry, there are no quizzes for this course.",
"failed",
error=True,
)
logger.warning(
"No quizzes found for course {}. Unable to update.".format(course_id)
)
return job.meta
for index, quiz in enumerate(quizzes):
quiz_id = quiz.get("id", None)
quiz_title = quiz.get("title", "[UNTITLED QUIZ]")
comp_perc = int(((float(index)) / float(num_quizzes)) * 100)
updating_str = "Updating quiz #{} - {} [{} of {}]"
update_job(
job,
comp_perc,
updating_str.format(quiz_id, quiz_title, index + 1, num_quizzes),
"processing",
error=False,
)
extension_response = extend_quiz(course_id, quiz, percent, user_ids)
if extension_response.get("success", False) is True:
# add/update quiz
quiz_obj, created = get_or_create(
db.session, Quiz, canvas_id=quiz_id, course_id=course.id
)
quiz_obj.title = quiz_title
quiz_obj.time_limit = quiz.get("time_limit")
db.session.commit()
added_time = extension_response.get("added_time", None)
if added_time is not None:
quiz_time_list.append(
{"title": quiz_title, "added_time": added_time}
)
else:
unchanged_quiz_time_list.append({"title": quiz_title})
else:
update_job(
job,
comp_perc,
extension_response.get("message", "An unknown error occured."),
"failed",
error=True,
)
logger.error("Extension failed: {}".format(extension_response))
return job.meta
msg_str = (
"Success! {} {} been updated for {} student(s) to have {}% time. "
"{} {} no time limit and were left unchanged."
)
message = msg_str.format(
len(quiz_time_list),
"quizzes have" if len(quiz_time_list) != 1 else "quiz has",
len(user_ids),
percent,
len(unchanged_quiz_time_list),
"quizzes have" if len(unchanged_quiz_time_list) != 1 else "quiz has",
)
update_job(job, 100, message, "complete", error=False)
job.meta["quiz_list"] = quiz_time_list
job.meta["unchanged_list"] = unchanged_quiz_time_list
job.save()
return job.meta
def refresh_background(course_id):
"""
Look up existing extensions and apply them to new quizzes.
:param course_id: The Canvas ID of the Course.
:type course_id: int
:rtype: dict
:returns: A dictionary containing two parts:
- success `bool` False if there was an error, True otherwise.
- message `str` A long description of success or failure.
"""
job = get_current_job()
update_job(job, 0, "Starting...", "started")
with app.app_context():
course, created = get_or_create(db.session, Course, canvas_id=course_id)
try:
course_name = get_course(course_id).get("name", "<UNNAMED COURSE>")
course.course_name = course_name
db.session.commit()
except requests.exceptions.HTTPError:
update_job(job, 0, "Course not found.", "failed", error=True)
logger.exception("Unable to find course #{}".format(course_id))
return job.meta
quizzes = missing_and_stale_quizzes(course_id)
num_quizzes = len(quizzes)
if num_quizzes < 1:
update_job(
job,
100,
"Complete. No quizzes required updates.",
"complete",
error=False,
)
return job.meta
percent_user_map = defaultdict(list)
inactive_list = []
update_job(job, 0, "Getting past extensions.", "processing", False)
for extension in course.extensions:
# If extension is inactive, ignore.
if not extension.active:
inactive_list.append(extension.user.sortable_name)
logger.debug("Extension #{} is inactive.".format(extension.id))
continue
user_canvas_id = (
User.query.filter_by(id=extension.user_id).first().canvas_id
)
# Check if user is in course. If not, deactivate extension.
try:
canvas_user = get_user(course_id, user_canvas_id)
# Skip user if not a student. Fixes an edge case where a
# student that previously recieved an extension changes roles.
enrolls = canvas_user.get("enrollments", [])
type_list = [
e["type"] for e in enrolls if e["enrollment_state"] == "active"
]
if not any(t == "StudentEnrollment" for t in type_list):
logger.info(
(
"User #{} was found in course #{}, but is not an "
"active student. Deactivating extension #{}. Roles "
"found: {}"
).format(
user_canvas_id,
course_id,
extension.id,
", ".join(type_list) if len(enrolls) > 0 else None,
)
)
extension.active = False
db.session.commit()
inactive_list.append(extension.user.sortable_name)
continue
except requests.exceptions.HTTPError:
log_str = "User #{} not in course #{}. Deactivating extension #{}."
logger.info(log_str.format(user_canvas_id, course_id, extension.id))
extension.active = False
db.session.commit()
inactive_list.append(extension.user.sortable_name)
continue
percent_user_map[extension.percent].append(user_canvas_id)
if len(percent_user_map) < 1:
msg_str = "No active extensions were found.<br>"
if len(inactive_list) > 0:
msg_str += " Extensions for the following students are inactive:<br>{}"
msg_str = msg_str.format("<br>".join(inactive_list))
update_job(job, 100, msg_str, "complete", error=False)
return job.meta
for index, quiz in enumerate(quizzes):
quiz_id = quiz.get("id", None)
quiz_title = quiz.get("title", "[UNTITLED QUIZ]")
comp_perc = int(((float(index)) / float(num_quizzes)) * 100)
refreshing_str = "Refreshing quiz #{} - {} [{} of {}]"
update_job(
job,
comp_perc,
refreshing_str.format(quiz_id, quiz_title, index + 1, num_quizzes),
"processing",
error=False,
)
for percent, user_list in percent_user_map.items():
extension_response = extend_quiz(course_id, quiz, percent, user_list)
if extension_response.get("success", False) is True:
# add/update quiz
quiz_obj, created = get_or_create(
db.session, Quiz, canvas_id=quiz_id, course_id=course.id
)
quiz_obj.title = quiz_title
quiz_obj.time_limit = quiz.get("time_limit")
db.session.commit()
else:
error_message = "Some quizzes couldn't be updated. "
error_message += extension_response.get("message", "")
update_job(job, comp_perc, error_message, "failed", error=True)
return job.meta
msg = "{} quizzes have been updated.".format(len(quizzes))
update_job(job, 100, msg, "complete", error=False)
return job.meta
@app.route("/missing_and_stale_quizzes/<course_id>/", methods=["GET"])
def missing_and_stale_quizzes_check(course_id):
"""
Check if there are missing quizzes.
:param course_id: The Canvas ID of the Course.
:type course_id: int
:rtype: str
:returns: A JSON-formatted string representation of a boolean.
"true" if there are missing quizzes, "false" if there are not.
"""
course = Course.query.filter_by(canvas_id=course_id).first()
if course is None:
# No record of this course. No need to update yet.
return "false"
num_extensions = Extension.query.filter_by(course_id=course.id).count()
if num_extensions == 0:
# There are no extensions for this course yet. No need to update.
return "false"
missing = len(missing_and_stale_quizzes(course_id, True)) > 0
return json.dumps(missing)
@app.route("/filter/<course_id>/", methods=["GET"])
@check_valid_user
@lti(error=error, request="session", role="staff", app=app)
def filter(lti=lti, course_id=None):
"""
Display a filtered and paginated list of students in the course.
:param course_id:
:type: int
:rtype: str
:returns: A list of students in the course using the template
user_list.html.
"""
query = request.args.get("query", "").lower()
page = int(request.args.get("page", 1))
per_page = int(request.args.get("per_page", config.DEFAULT_PER_PAGE))
user_list, max_pages = search_students(
course_id, per_page=per_page, page=page, search_term=query
)
if not user_list or max_pages < 1:
user_list = []
max_pages = 1
return render_template(
"user_list.html", users=user_list, current_page_number=page, max_pages=max_pages
)
@app.route("/launch", methods=["POST"])
@lti(error=error, request="initial", role="staff", app=app)
def lti_tool(lti=lti):
"""
Bootstrapper for lti.
"""
course_id = request.values.get("custom_canvas_course_id")
canvas_user_id = request.values.get("custom_canvas_user_id")
canvas_domain = request.values.get("custom_canvas_api_domain")
if canvas_domain not in config.ALLOWED_CANVAS_DOMAINS:
msg = (
"<p>This tool is only available from the following domain(s):<br/>{}</p>"
"<p>You attempted to access from this domain:<br/>{}</p>"
)
return render_template(
"error.html",
message=msg.format(", ".join(config.ALLOWED_CANVAS_DOMAINS), canvas_domain),
)
roles = request.values.get("roles", [])
session["is_admin"] = "Administrator" in roles
session["canvas_user_id"] = canvas_user_id
session["lti_logged_in"] = True
return redirect(url_for("quiz", course_id=course_id))
|
@wraps(f)
def decorated_function(*args, **kwargs):
"""
Decorator to check if the user is allowed access to the app.
If user is allowed, return the decorated function.
Otherwise, return an error page with corresponding message.
"""
canvas_user_id = session.get("canvas_user_id")
lti_logged_in = session.get("lti_logged_in", False)
if not lti_logged_in or not canvas_user_id:
return render_template("error.html", message="Not allowed!")
if "course_id" not in kwargs.keys():
return render_template("error.html", message="No course_id provided.")
course_id = int(kwargs.get("course_id"))
if not session.get("is_admin", False):
enrollments_url = "{}courses/{}/enrollments".format(
config.API_URL, course_id
)
payload = {
"user_id": canvas_user_id,
"type": ["TeacherEnrollment", "TaEnrollment", "DesignerEnrollment"],
}
user_enrollments_response = requests.get(
enrollments_url, data=json.dumps(payload), headers=json_headers
)
user_enrollments = user_enrollments_response.json()
if not user_enrollments or "errors" in user_enrollments:
message = (
"You are not enrolled in this course as a Teacher, "
"TA, or Designer."
)
return render_template("error.html", message=message)
return f(*args, **kwargs)
| 53 | 90 |
# -*- coding: utf-8 -*-
import json
import logging
from collections import defaultdict
from functools import wraps
from logging.config import dictConfig
from subprocess import call
import redis
import requests
from flask import Flask, Response, redirect, render_template, request, session, url_for
from flask_migrate import Migrate
from pylti.flask import lti
from redis.exceptions import ConnectionError
from rq import Queue, get_current_job
from rq.exceptions import NoSuchJobError
from rq.job import Job
from sqlalchemy.sql import text
import config
from models import Course, Extension, Quiz, User, db
from utils import (
extend_quiz,
get_course,
get_or_create,
get_quizzes,
get_user,
missing_and_stale_quizzes,
search_students,
update_job,
)
conn = redis.from_url(config.REDIS_URL)
q = Queue("quizext", connection=conn)
app = Flask(__name__)
app.config.from_object("config")
dictConfig(config.LOGGING_CONFIG)
logger = logging.getLogger("app")
db.init_app(app)
migrate = Migrate(app, db)
json_headers = {
"Authorization": "Bearer " + config.API_KEY,
"Content-type": "application/json",
}
def check_valid_user(f):
@wraps(f)
def decorated_function(*args, **kwargs):
"""
Decorator to check if the user is allowed access to the app.
If user is allowed, return the decorated function.
Otherwise, return an error page with corresponding message.
"""
canvas_user_id = session.get("canvas_user_id")
lti_logged_in = session.get("lti_logged_in", False)
if not lti_logged_in or not canvas_user_id:
return render_template("error.html", message="Not allowed!")
if "course_id" not in kwargs.keys():
return render_template("error.html", message="No course_id provided.")
course_id = int(kwargs.get("course_id"))
if not session.get("is_admin", False):
enrollments_url = "{}courses/{}/enrollments".format(
config.API_URL, course_id
)
payload = {
"user_id": canvas_user_id,
"type": ["TeacherEnrollment", "TaEnrollment", "DesignerEnrollment"],
}
user_enrollments_response = requests.get(
enrollments_url, data=json.dumps(payload), headers=json_headers
)
user_enrollments = user_enrollments_response.json()
if not user_enrollments or "errors" in user_enrollments:
message = (
"You are not enrolled in this course as a Teacher, "
"TA, or Designer."
)
return render_template("error.html", message=message)
return f(*args, **kwargs)
return decorated_function
def error(exception=None):
return Response(
render_template(
"error.html",
message=exception.get(
"exception", "Please contact your System Administrator."
),
)
)
@app.context_processor
def add_google_analytics_id():
return dict(GOOGLE_ANALYTICS=config.GOOGLE_ANALYTICS)
@app.route("/", methods=["POST", "GET"])
def index():
"""
Default app index.
"""
return "Please contact your System Administrator."
@app.route("/status", methods=["GET"])
def status(): # pragma: no cover
"""
Runs smoke tests and reports status
"""
try:
job_queue_length = len(q.jobs)
except ConnectionError:
job_queue_length = -1
status = {
"tool": "Quiz Extensions",
"checks": {
"index": False,
"xml": False,
"api_key": False,
"redis": False,
"db": False,
"worker": False,
},
"url": url_for("index", _external=True),
"api_url": config.API_URL,
"debug": app.debug,
"xml_url": url_for("xml", _external=True),
"job_queue": job_queue_length,
}
# Check index
try:
response = requests.get(url_for("index", _external=True), verify=False)
status["checks"]["index"] = (
response.text == "Please contact your System Administrator."
)
except Exception:
logger.exception("Index check failed.")
# Check xml
try:
response = requests.get(url_for("xml", _external=True), verify=False)
status["checks"]["xml"] = "application/xml" in response.headers.get(
"Content-Type"
)
except Exception:
logger.exception("XML check failed.")
# Check API Key
try:
response = requests.get(
"{}users/self".format(config.API_URL),
headers={"Authorization": "Bearer " + config.API_KEY},
)
status["checks"]["api_key"] = response.status_code == 200
except Exception:
logger.exception("API Key check failed.")
# Check redis
try:
response = conn.echo("test")
status["checks"]["redis"] = response == b"test"
except ConnectionError:
logger.exception("Redis connection failed.")
# Check DB connection
try:
db.session.query(text("1")).all()
status["checks"]["db"] = True
except Exception:
logger.exception("DB connection failed.")
# Check RQ Worker
status["checks"]["worker"] = (
call('ps aux | grep "rq worker" | grep "quizext" | grep -v grep', shell=True)
== 0
)
# Overall health check - if all checks are True
status["healthy"] = all(v is True for k, v in status["checks"].items())
return Response(json.dumps(status), mimetype="application/json")
@app.route("/lti.xml", methods=["GET"])
def xml():
"""
Returns the lti.xml file for the app.
"""
from urllib.parse import urlparse
domain = urlparse(request.url_root).netloc
return Response(
render_template("lti.xml", tool_id=config.LTI_TOOL_ID, domain=domain),
mimetype="application/xml",
)
@app.route("/quiz/<course_id>/", methods=["GET"])
@check_valid_user
@lti(error=error, request="session", role="staff", app=app)
def quiz(lti=lti, course_id=None):
"""
Main landing page for the app.
Displays a page to the user that allows them to select students
to moderate quizzes for.
"""
return render_template(
"userselect.html", course_id=course_id, current_page_number=1
)
@app.route("/refresh/<course_id>/", methods=["POST"])
def refresh(course_id=None):
"""
Creates a new `refresh_background` job.
:param course_id: The Canvas ID of the Course.
:type course_id: int
:rtype: flask.Response
:returns: A JSON-formatted response containing a url for the started job.
"""
job = q.enqueue_call(func=refresh_background, args=(course_id,))
return Response(
json.dumps({"refresh_job_url": url_for("job_status", job_key=job.get_id())}),
mimetype="application/json",
status=202,
)
@app.route("/update/<course_id>/", methods=["POST"])
@check_valid_user
@lti(error=error, request="session", role="staff", app=app)
def update(lti=lti, course_id=None):
"""
Creates a new `update_background` job.
:param course_id: The Canvas ID of the Course.
:type coruse_id: int
:rtype: flask.Response
:returns: A JSON-formatted response containing urls for the started jobs.
"""
refresh_job = q.enqueue_call(func=refresh_background, args=(course_id,))
update_job = q.enqueue_call(
func=update_background,
args=(course_id, request.get_json()),
depends_on=refresh_job,
)
return Response(
json.dumps(
{
"refresh_job_url": url_for("job_status", job_key=refresh_job.get_id()),
"update_job_url": url_for("job_status", job_key=update_job.get_id()),
}
),
mimetype="application/json",
status=202,
)
@app.route("/jobs/<job_key>/", methods=["GET"])
def job_status(job_key):
try:
job = Job.fetch(job_key, connection=conn)
except NoSuchJobError:
return Response(
json.dumps(
{
"error": True,
"status_msg": "{} is not a valid job key.".format(job_key),
}
),
mimetype="application/json",
status=404,
)
if job.is_finished:
return Response(json.dumps(job.result), mimetype="application/json", status=200)
elif job.is_failed:
logger.error("Job {} failed.\n{}".format(job_key, job.exc_info))
return Response(
json.dumps(
{
"error": True,
"status_msg": "Job {} failed to complete.".format(job_key),
}
),
mimetype="application/json",
status=500,
)
else:
return Response(json.dumps(job.meta), mimetype="application/json", status=202)
def update_background(course_id, extension_dict):
"""
Update time on selected students' quizzes to a specified percentage.
:param course_id: The Canvas ID of the Course to update in
:type course_id: int
:param extension_dict: A dictionary that includes the percent of
time and a list of canvas user ids.
Example:
{
'percent': '300',
'user_ids': [
'0123456',
'1234567',
'9867543',
'5555555'
]
}
:type extension_dict: dict
"""
job = get_current_job()
update_job(job, 0, "Starting...", "started")
with app.app_context():
if not extension_dict:
update_job(job, 0, "Invalid Request", "failed", error=True)
logger.warning("Invalid Request: {}".format(extension_dict))
return job.meta
try:
course_json = get_course(course_id)
except requests.exceptions.HTTPError:
update_job(job, 0, "Course not found.", "failed", error=True)
logger.exception("Unable to find course #{}".format(course_id))
return job.meta
course_name = course_json.get("name", "<UNNAMED COURSE>")
user_ids = extension_dict.get("user_ids", [])
percent = extension_dict.get("percent", None)
if not percent:
update_job(job, 0, "`percent` field required.", "failed", error=True)
logger.warning(
"Percent field not provided. Request: {}".format(extension_dict)
)
return job.meta
course, created = get_or_create(db.session, Course, canvas_id=course_id)
course.course_name = course_name
db.session.commit()
for user_id in user_ids:
try:
canvas_user = get_user(course_id, user_id)
sortable_name = canvas_user.get("sortable_name", "<MISSING NAME>")
sis_id = canvas_user.get("sis_user_id")
except requests.exceptions.HTTPError:
# Unable to find user. Log and skip them.
logger.warning(
"Unable to find user #{} in course #{}".format(user_id, course_id)
)
continue
user, created = get_or_create(db.session, User, canvas_id=user_id)
user.sortable_name = sortable_name
user.sis_id = sis_id
db.session.commit()
# create/update extension
extension, created = get_or_create(
db.session, Extension, course_id=course.id, user_id=user.id
)
extension.percent = percent
db.session.commit()
quizzes = get_quizzes(course_id)
num_quizzes = len(quizzes)
quiz_time_list = []
unchanged_quiz_time_list = []
if num_quizzes < 1:
update_job(
job,
0,
"Sorry, there are no quizzes for this course.",
"failed",
error=True,
)
logger.warning(
"No quizzes found for course {}. Unable to update.".format(course_id)
)
return job.meta
for index, quiz in enumerate(quizzes):
quiz_id = quiz.get("id", None)
quiz_title = quiz.get("title", "[UNTITLED QUIZ]")
comp_perc = int(((float(index)) / float(num_quizzes)) * 100)
updating_str = "Updating quiz #{} - {} [{} of {}]"
update_job(
job,
comp_perc,
updating_str.format(quiz_id, quiz_title, index + 1, num_quizzes),
"processing",
error=False,
)
extension_response = extend_quiz(course_id, quiz, percent, user_ids)
if extension_response.get("success", False) is True:
# add/update quiz
quiz_obj, created = get_or_create(
db.session, Quiz, canvas_id=quiz_id, course_id=course.id
)
quiz_obj.title = quiz_title
quiz_obj.time_limit = quiz.get("time_limit")
db.session.commit()
added_time = extension_response.get("added_time", None)
if added_time is not None:
quiz_time_list.append(
{"title": quiz_title, "added_time": added_time}
)
else:
unchanged_quiz_time_list.append({"title": quiz_title})
else:
update_job(
job,
comp_perc,
extension_response.get("message", "An unknown error occured."),
"failed",
error=True,
)
logger.error("Extension failed: {}".format(extension_response))
return job.meta
msg_str = (
"Success! {} {} been updated for {} student(s) to have {}% time. "
"{} {} no time limit and were left unchanged."
)
message = msg_str.format(
len(quiz_time_list),
"quizzes have" if len(quiz_time_list) != 1 else "quiz has",
len(user_ids),
percent,
len(unchanged_quiz_time_list),
"quizzes have" if len(unchanged_quiz_time_list) != 1 else "quiz has",
)
update_job(job, 100, message, "complete", error=False)
job.meta["quiz_list"] = quiz_time_list
job.meta["unchanged_list"] = unchanged_quiz_time_list
job.save()
return job.meta
def refresh_background(course_id):
"""
Look up existing extensions and apply them to new quizzes.
:param course_id: The Canvas ID of the Course.
:type course_id: int
:rtype: dict
:returns: A dictionary containing two parts:
- success `bool` False if there was an error, True otherwise.
- message `str` A long description of success or failure.
"""
job = get_current_job()
update_job(job, 0, "Starting...", "started")
with app.app_context():
course, created = get_or_create(db.session, Course, canvas_id=course_id)
try:
course_name = get_course(course_id).get("name", "<UNNAMED COURSE>")
course.course_name = course_name
db.session.commit()
except requests.exceptions.HTTPError:
update_job(job, 0, "Course not found.", "failed", error=True)
logger.exception("Unable to find course #{}".format(course_id))
return job.meta
quizzes = missing_and_stale_quizzes(course_id)
num_quizzes = len(quizzes)
if num_quizzes < 1:
update_job(
job,
100,
"Complete. No quizzes required updates.",
"complete",
error=False,
)
return job.meta
percent_user_map = defaultdict(list)
inactive_list = []
update_job(job, 0, "Getting past extensions.", "processing", False)
for extension in course.extensions:
# If extension is inactive, ignore.
if not extension.active:
inactive_list.append(extension.user.sortable_name)
logger.debug("Extension #{} is inactive.".format(extension.id))
continue
user_canvas_id = (
User.query.filter_by(id=extension.user_id).first().canvas_id
)
# Check if user is in course. If not, deactivate extension.
try:
canvas_user = get_user(course_id, user_canvas_id)
# Skip user if not a student. Fixes an edge case where a
# student that previously recieved an extension changes roles.
enrolls = canvas_user.get("enrollments", [])
type_list = [
e["type"] for e in enrolls if e["enrollment_state"] == "active"
]
if not any(t == "StudentEnrollment" for t in type_list):
logger.info(
(
"User #{} was found in course #{}, but is not an "
"active student. Deactivating extension #{}. Roles "
"found: {}"
).format(
user_canvas_id,
course_id,
extension.id,
", ".join(type_list) if len(enrolls) > 0 else None,
)
)
extension.active = False
db.session.commit()
inactive_list.append(extension.user.sortable_name)
continue
except requests.exceptions.HTTPError:
log_str = "User #{} not in course #{}. Deactivating extension #{}."
logger.info(log_str.format(user_canvas_id, course_id, extension.id))
extension.active = False
db.session.commit()
inactive_list.append(extension.user.sortable_name)
continue
percent_user_map[extension.percent].append(user_canvas_id)
if len(percent_user_map) < 1:
msg_str = "No active extensions were found.<br>"
if len(inactive_list) > 0:
msg_str += " Extensions for the following students are inactive:<br>{}"
msg_str = msg_str.format("<br>".join(inactive_list))
update_job(job, 100, msg_str, "complete", error=False)
return job.meta
for index, quiz in enumerate(quizzes):
quiz_id = quiz.get("id", None)
quiz_title = quiz.get("title", "[UNTITLED QUIZ]")
comp_perc = int(((float(index)) / float(num_quizzes)) * 100)
refreshing_str = "Refreshing quiz #{} - {} [{} of {}]"
update_job(
job,
comp_perc,
refreshing_str.format(quiz_id, quiz_title, index + 1, num_quizzes),
"processing",
error=False,
)
for percent, user_list in percent_user_map.items():
extension_response = extend_quiz(course_id, quiz, percent, user_list)
if extension_response.get("success", False) is True:
# add/update quiz
quiz_obj, created = get_or_create(
db.session, Quiz, canvas_id=quiz_id, course_id=course.id
)
quiz_obj.title = quiz_title
quiz_obj.time_limit = quiz.get("time_limit")
db.session.commit()
else:
error_message = "Some quizzes couldn't be updated. "
error_message += extension_response.get("message", "")
update_job(job, comp_perc, error_message, "failed", error=True)
return job.meta
msg = "{} quizzes have been updated.".format(len(quizzes))
update_job(job, 100, msg, "complete", error=False)
return job.meta
@app.route("/missing_and_stale_quizzes/<course_id>/", methods=["GET"])
def missing_and_stale_quizzes_check(course_id):
"""
Check if there are missing quizzes.
:param course_id: The Canvas ID of the Course.
:type course_id: int
:rtype: str
:returns: A JSON-formatted string representation of a boolean.
"true" if there are missing quizzes, "false" if there are not.
"""
course = Course.query.filter_by(canvas_id=course_id).first()
if course is None:
# No record of this course. No need to update yet.
return "false"
num_extensions = Extension.query.filter_by(course_id=course.id).count()
if num_extensions == 0:
# There are no extensions for this course yet. No need to update.
return "false"
missing = len(missing_and_stale_quizzes(course_id, True)) > 0
return json.dumps(missing)
@app.route("/filter/<course_id>/", methods=["GET"])
@check_valid_user
@lti(error=error, request="session", role="staff", app=app)
def filter(lti=lti, course_id=None):
"""
Display a filtered and paginated list of students in the course.
:param course_id:
:type: int
:rtype: str
:returns: A list of students in the course using the template
user_list.html.
"""
query = request.args.get("query", "").lower()
page = int(request.args.get("page", 1))
per_page = int(request.args.get("per_page", config.DEFAULT_PER_PAGE))
user_list, max_pages = search_students(
course_id, per_page=per_page, page=page, search_term=query
)
if not user_list or max_pages < 1:
user_list = []
max_pages = 1
return render_template(
"user_list.html", users=user_list, current_page_number=page, max_pages=max_pages
)
@app.route("/launch", methods=["POST"])
@lti(error=error, request="initial", role="staff", app=app)
def lti_tool(lti=lti):
"""
Bootstrapper for lti.
"""
course_id = request.values.get("custom_canvas_course_id")
canvas_user_id = request.values.get("custom_canvas_user_id")
canvas_domain = request.values.get("custom_canvas_api_domain")
if canvas_domain not in config.ALLOWED_CANVAS_DOMAINS:
msg = (
"<p>This tool is only available from the following domain(s):<br/>{}</p>"
"<p>You attempted to access from this domain:<br/>{}</p>"
)
return render_template(
"error.html",
message=msg.format(", ".join(config.ALLOWED_CANVAS_DOMAINS), canvas_domain),
)
roles = request.values.get("roles", [])
session["is_admin"] = "Administrator" in roles
session["canvas_user_id"] = canvas_user_id
session["lti_logged_in"] = True
return redirect(url_for("quiz", course_id=course_id))
|
get_moon_j2000
|
Code to determine the apparent J2000 position for a given
time and at a given position for the observatory.
epoch needs to be a datetime or Time object.
position is a list/tuple of X/Y/Z positions
|
from datetime import datetime
from astropy.time import Time
def read_tle_file(tlefile, **kwargs):
"""
Read in a TLE file and return the TLE that is closest to the date you want to
propagate the orbit to.
"""
times = []
line1 = []
line2 = []
from os import path
from datetime import datetime
# Catch if the file can't be opened:
try:
f = open(tlefile, 'r')
except FileNotFoundError:
print("Unable to open: "+tlefile)
ln=0
for line in f:
# print(line)
if (ln == 0):
year= int(line[18:20])
day = int(line[20:23])
times.extend([datetime.strptime("{}:{}".format(year, day), "%y:%j")])
line1.extend([line.strip()])
ln=1
else:
ln=0
line2.extend([line.strip()])
f.close()
return times, line1, line2
def get_epoch_tle(epoch, tlefile):
"""
Find the TLE that is closest to the epoch you want to search.
epoch is a datetime object, tlefile is the file you want to search through.
"""
times, line1, line2 = read_tle_file(tlefile)
from datetime import datetime
from astropy.time import Time
# Allow astropy Time objects
if type(epoch) is Time:
epoch = epoch.datetime
mindt = 100.
min_ind = 0
for ind, t in enumerate(times):
dt = abs((epoch -t).days)
if dt < mindt:
min_ind = ind
mindt = dt
good_line1 = line1[min_ind]
good_line2 = line2[min_ind]
return mindt, good_line1, good_line2
def convert_nustar_time(t, leap=5):
'''
Converts MET seconds to a datetime object.
Default is to subtract off 5 leap seconds.
'''
import astropy.units as u
mjdref = 55197*u.d
met = (t - leap)* u.s + mjdref
met_datetime = Time(met.to(u.d), format = 'mjd').datetime
return met_datetime
def get_nustar_location(checktime, line1, line2):
'''
Code to determine the spacecraft location from the TLE.
Inputs are a datetime object and the two lines of the TLE you want to use.
Returns a tuple that has the X, Y, and Z geocentric coordinates (in km).
'''
from sgp4.earth_gravity import wgs72
from sgp4.io import twoline2rv
from astropy.coordinates import EarthLocation
satellite = twoline2rv(line1, line2, wgs72)
position, velocity = satellite.propagate(
checktime.year, checktime.month, checktime.day,
checktime.hour, checktime.minute, checktime.second)
return position
def eci2el(x,y,z,dt):
"""
Convert Earth-Centered Inertial (ECI) cartesian coordinates to ITRS for astropy EarthLocation object.
Inputs :
x = ECI X-coordinate
y = ECI Y-coordinate
z = ECI Z-coordinate
dt = UTC time (datetime object)
"""
from astropy.coordinates import GCRS, ITRS, EarthLocation, CartesianRepresentation
import astropy.units as u
# convert datetime object to astropy time object
tt=Time(dt,format='datetime')
# Read the coordinates in the Geocentric Celestial Reference System
gcrs = GCRS(CartesianRepresentation(x=x, y=y,z=z), obstime=tt)
# Convert it to an Earth-fixed frame
itrs = gcrs.transform_to(ITRS(obstime=tt))
el = EarthLocation.from_geocentric(itrs.x, itrs.y, itrs.z)
return el
# MASKED: get_moon_j2000 function (lines 138-173)
|
def get_moon_j2000(epoch, line1, line2, position = None):
'''
Code to determine the apparent J2000 position for a given
time and at a given position for the observatory.
epoch needs to be a datetime or Time object.
position is a list/tuple of X/Y/Z positions
'''
from astropy.time import Time
from astropy.coordinates import get_moon, EarthLocation
import astropy.units as u
import sys
from datetime import datetime
if type(epoch) is Time:
epoch = epoch.datetime
if position is None:
position = get_nustar_location(epoch, line1, line2) # position in ECI coords
t=Time(epoch)
loc = eci2el(*position*u.km,t)
moon_coords = get_moon(t,loc)
# Get just the coordinates in degrees
ra_moon, dec_moon = moon_coords.ra.degree * u.deg, moon_coords.dec.degree*u.deg
return ra_moon, dec_moon
| 138 | 173 |
from datetime import datetime
from astropy.time import Time
def read_tle_file(tlefile, **kwargs):
"""
Read in a TLE file and return the TLE that is closest to the date you want to
propagate the orbit to.
"""
times = []
line1 = []
line2 = []
from os import path
from datetime import datetime
# Catch if the file can't be opened:
try:
f = open(tlefile, 'r')
except FileNotFoundError:
print("Unable to open: "+tlefile)
ln=0
for line in f:
# print(line)
if (ln == 0):
year= int(line[18:20])
day = int(line[20:23])
times.extend([datetime.strptime("{}:{}".format(year, day), "%y:%j")])
line1.extend([line.strip()])
ln=1
else:
ln=0
line2.extend([line.strip()])
f.close()
return times, line1, line2
def get_epoch_tle(epoch, tlefile):
"""
Find the TLE that is closest to the epoch you want to search.
epoch is a datetime object, tlefile is the file you want to search through.
"""
times, line1, line2 = read_tle_file(tlefile)
from datetime import datetime
from astropy.time import Time
# Allow astropy Time objects
if type(epoch) is Time:
epoch = epoch.datetime
mindt = 100.
min_ind = 0
for ind, t in enumerate(times):
dt = abs((epoch -t).days)
if dt < mindt:
min_ind = ind
mindt = dt
good_line1 = line1[min_ind]
good_line2 = line2[min_ind]
return mindt, good_line1, good_line2
def convert_nustar_time(t, leap=5):
'''
Converts MET seconds to a datetime object.
Default is to subtract off 5 leap seconds.
'''
import astropy.units as u
mjdref = 55197*u.d
met = (t - leap)* u.s + mjdref
met_datetime = Time(met.to(u.d), format = 'mjd').datetime
return met_datetime
def get_nustar_location(checktime, line1, line2):
'''
Code to determine the spacecraft location from the TLE.
Inputs are a datetime object and the two lines of the TLE you want to use.
Returns a tuple that has the X, Y, and Z geocentric coordinates (in km).
'''
from sgp4.earth_gravity import wgs72
from sgp4.io import twoline2rv
from astropy.coordinates import EarthLocation
satellite = twoline2rv(line1, line2, wgs72)
position, velocity = satellite.propagate(
checktime.year, checktime.month, checktime.day,
checktime.hour, checktime.minute, checktime.second)
return position
def eci2el(x,y,z,dt):
"""
Convert Earth-Centered Inertial (ECI) cartesian coordinates to ITRS for astropy EarthLocation object.
Inputs :
x = ECI X-coordinate
y = ECI Y-coordinate
z = ECI Z-coordinate
dt = UTC time (datetime object)
"""
from astropy.coordinates import GCRS, ITRS, EarthLocation, CartesianRepresentation
import astropy.units as u
# convert datetime object to astropy time object
tt=Time(dt,format='datetime')
# Read the coordinates in the Geocentric Celestial Reference System
gcrs = GCRS(CartesianRepresentation(x=x, y=y,z=z), obstime=tt)
# Convert it to an Earth-fixed frame
itrs = gcrs.transform_to(ITRS(obstime=tt))
el = EarthLocation.from_geocentric(itrs.x, itrs.y, itrs.z)
return el
def get_moon_j2000(epoch, line1, line2, position = None):
'''
Code to determine the apparent J2000 position for a given
time and at a given position for the observatory.
epoch needs to be a datetime or Time object.
position is a list/tuple of X/Y/Z positions
'''
from astropy.time import Time
from astropy.coordinates import get_moon, EarthLocation
import astropy.units as u
import sys
from datetime import datetime
if type(epoch) is Time:
epoch = epoch.datetime
if position is None:
position = get_nustar_location(epoch, line1, line2) # position in ECI coords
t=Time(epoch)
loc = eci2el(*position*u.km,t)
moon_coords = get_moon(t,loc)
# Get just the coordinates in degrees
ra_moon, dec_moon = moon_coords.ra.degree * u.deg, moon_coords.dec.degree*u.deg
return ra_moon, dec_moon
|
check_or_get_class
|
Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
|
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
# MASKED: check_or_get_class function (lines 146-175)
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
| 146 | 175 |
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
get_class
|
Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
|
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
# MASKED: get_class function (lines 178-214)
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
| 178 | 214 |
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
check_or_get_instance
|
Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
|
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
# MASKED: check_or_get_instance function (lines 217-253)
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
| 217 | 253 |
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
get_instance
|
Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
|
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
# MASKED: get_instance function (lines 256-293)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
| 256 | 293 |
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
check_or_get_instance_with_redundant_kwargs
|
Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
|
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
# MASKED: check_or_get_instance_with_redundant_kwargs function (lines 296-334)
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
| 296 | 334 |
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
get_instance_with_redundant_kwargs
|
Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
|
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
# MASKED: get_instance_with_redundant_kwargs function (lines 337-372)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
| 337 | 372 |
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
get_function
|
Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
|
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
# MASKED: get_function function (lines 375-408)
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
| 375 | 408 |
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
call_function_with_redundant_kwargs
|
Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
|
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
# MASKED: call_function_with_redundant_kwargs function (lines 411-440)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
| 411 | 440 |
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
get_instance_kwargs
|
Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
|
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
# MASKED: get_instance_kwargs function (lines 443-467)
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
| 443 | 467 |
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
strip_token
|
Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
|
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
# MASKED: strip_token function (lines 655-714)
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
| 655 | 714 |
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
strip_eos
|
Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
|
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
# MASKED: strip_eos function (lines 717-761)
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
| 717 | 761 |
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
strip_bos
|
Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
|
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
# MASKED: strip_bos function (lines 767-813)
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
| 767 | 813 |
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
str_join
|
Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
|
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
# MASKED: str_join function (lines 883-910)
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
| 883 | 910 |
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
|
get_schema
|
Information about a medication that is used to support knowledge.
id: Unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
dosage: Dosage for the medication for the specific guidelines.
indicationCodeableConcept: Indication for use that apply to the specific administration guidelines.
indicationReference: Indication for use that apply to the specific administration guidelines.
patientCharacteristics: Characteristics of the patient that are relevant to the administration
guidelines (for example, height, weight, gender, etc.).
|
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit it manually
# noinspection PyPep8Naming
class MedicationKnowledge_AdministrationGuidelinesSchema:
"""
Information about a medication that is used to support knowledge.
"""
# noinspection PyDefaultArgument
# MASKED: get_schema function (lines 14-267)
|
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = None,
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
include_modifierExtension: Optional[bool] = False,
use_date_for: Optional[List[str]] = None,
parent_path: Optional[str] = "",
) -> Union[StructType, DataType]:
"""
Information about a medication that is used to support knowledge.
id: Unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
dosage: Dosage for the medication for the specific guidelines.
indicationCodeableConcept: Indication for use that apply to the specific administration guidelines.
indicationReference: Indication for use that apply to the specific administration guidelines.
patientCharacteristics: Characteristics of the patient that are relevant to the administration
guidelines (for example, height, weight, gender, etc.).
"""
if extension_fields is None:
extension_fields = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
"valueReference",
"valueCodeableConcept",
"valueAddress",
]
from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.r4.complex_types.medicationknowledge_dosage import (
MedicationKnowledge_DosageSchema,
)
from spark_fhir_schemas.r4.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.r4.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.r4.complex_types.medicationknowledge_patientcharacteristics import (
MedicationKnowledge_PatientCharacteristicsSchema,
)
if (
max_recursion_limit
and nesting_list.count("MedicationKnowledge_AdministrationGuidelines")
>= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + [
"MedicationKnowledge_AdministrationGuidelines"
]
my_parent_path = (
parent_path + ".medicationknowledge_administrationguidelines"
if parent_path
else "medicationknowledge_administrationguidelines"
)
schema = StructType(
[
# Unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. To make the use of extensions safe and manageable,
# there is a strict set of governance applied to the definition and use of
# extensions. Though any implementer can define an extension, there is a set of
# requirements that SHALL be met as part of the definition of the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the element and that modifies the understanding of the element
# in which it is contained and/or the understanding of the containing element's
# descendants. Usually modifier elements provide negation or qualification. To
# make the use of extensions safe and manageable, there is a strict set of
# governance applied to the definition and use of extensions. Though any
# implementer can define an extension, there is a set of requirements that SHALL
# be met as part of the definition of the extension. Applications processing a
# resource are required to check for modifier extensions.
#
# Modifier extensions SHALL NOT change the meaning of any elements on Resource
# or DomainResource (including cannot change the meaning of modifierExtension
# itself).
StructField(
"modifierExtension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Dosage for the medication for the specific guidelines.
StructField(
"dosage",
ArrayType(
MedicationKnowledge_DosageSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Indication for use that apply to the specific administration guidelines.
StructField(
"indicationCodeableConcept",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Indication for use that apply to the specific administration guidelines.
StructField(
"indicationReference",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Characteristics of the patient that are relevant to the administration
# guidelines (for example, height, weight, gender, etc.).
StructField(
"patientCharacteristics",
ArrayType(
MedicationKnowledge_PatientCharacteristicsSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
if not include_modifierExtension:
schema.fields = [
c
if c.name != "modifierExtension"
else StructField("modifierExtension", StringType(), True)
for c in schema.fields
]
return schema
| 14 | 267 |
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit it manually
# noinspection PyPep8Naming
class MedicationKnowledge_AdministrationGuidelinesSchema:
"""
Information about a medication that is used to support knowledge.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = None,
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
include_modifierExtension: Optional[bool] = False,
use_date_for: Optional[List[str]] = None,
parent_path: Optional[str] = "",
) -> Union[StructType, DataType]:
"""
Information about a medication that is used to support knowledge.
id: Unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
dosage: Dosage for the medication for the specific guidelines.
indicationCodeableConcept: Indication for use that apply to the specific administration guidelines.
indicationReference: Indication for use that apply to the specific administration guidelines.
patientCharacteristics: Characteristics of the patient that are relevant to the administration
guidelines (for example, height, weight, gender, etc.).
"""
if extension_fields is None:
extension_fields = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
"valueReference",
"valueCodeableConcept",
"valueAddress",
]
from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.r4.complex_types.medicationknowledge_dosage import (
MedicationKnowledge_DosageSchema,
)
from spark_fhir_schemas.r4.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.r4.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.r4.complex_types.medicationknowledge_patientcharacteristics import (
MedicationKnowledge_PatientCharacteristicsSchema,
)
if (
max_recursion_limit
and nesting_list.count("MedicationKnowledge_AdministrationGuidelines")
>= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + [
"MedicationKnowledge_AdministrationGuidelines"
]
my_parent_path = (
parent_path + ".medicationknowledge_administrationguidelines"
if parent_path
else "medicationknowledge_administrationguidelines"
)
schema = StructType(
[
# Unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. To make the use of extensions safe and manageable,
# there is a strict set of governance applied to the definition and use of
# extensions. Though any implementer can define an extension, there is a set of
# requirements that SHALL be met as part of the definition of the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the element and that modifies the understanding of the element
# in which it is contained and/or the understanding of the containing element's
# descendants. Usually modifier elements provide negation or qualification. To
# make the use of extensions safe and manageable, there is a strict set of
# governance applied to the definition and use of extensions. Though any
# implementer can define an extension, there is a set of requirements that SHALL
# be met as part of the definition of the extension. Applications processing a
# resource are required to check for modifier extensions.
#
# Modifier extensions SHALL NOT change the meaning of any elements on Resource
# or DomainResource (including cannot change the meaning of modifierExtension
# itself).
StructField(
"modifierExtension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Dosage for the medication for the specific guidelines.
StructField(
"dosage",
ArrayType(
MedicationKnowledge_DosageSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Indication for use that apply to the specific administration guidelines.
StructField(
"indicationCodeableConcept",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Indication for use that apply to the specific administration guidelines.
StructField(
"indicationReference",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Characteristics of the patient that are relevant to the administration
# guidelines (for example, height, weight, gender, etc.).
StructField(
"patientCharacteristics",
ArrayType(
MedicationKnowledge_PatientCharacteristicsSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
if not include_modifierExtension:
schema.fields = [
c
if c.name != "modifierExtension"
else StructField("modifierExtension", StringType(), True)
for c in schema.fields
]
return schema
|
__init__
|
The set of arguments for constructing a FeatureGroup resource.
:param pulumi.Input[str] event_time_feature_name: The Event Time Feature Name.
:param pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]] feature_definitions: An Array of Feature Definition
:param pulumi.Input[str] record_identifier_feature_name: The Record Identifier Feature Name.
:param pulumi.Input[str] description: Description about the FeatureGroup.
:param pulumi.Input[str] feature_group_name: The Name of the FeatureGroup.
:param pulumi.Input[str] role_arn: Role Arn
:param pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]] tags: An array of key-value pair to apply to this resource.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['FeatureGroupArgs', 'FeatureGroup']
@pulumi.input_type
class FeatureGroupArgs:
# MASKED: __init__ function (lines 18-52)
@property
@pulumi.getter(name="eventTimeFeatureName")
def event_time_feature_name(self) -> pulumi.Input[str]:
"""
The Event Time Feature Name.
"""
return pulumi.get(self, "event_time_feature_name")
@event_time_feature_name.setter
def event_time_feature_name(self, value: pulumi.Input[str]):
pulumi.set(self, "event_time_feature_name", value)
@property
@pulumi.getter(name="featureDefinitions")
def feature_definitions(self) -> pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]]:
"""
An Array of Feature Definition
"""
return pulumi.get(self, "feature_definitions")
@feature_definitions.setter
def feature_definitions(self, value: pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]]):
pulumi.set(self, "feature_definitions", value)
@property
@pulumi.getter(name="recordIdentifierFeatureName")
def record_identifier_feature_name(self) -> pulumi.Input[str]:
"""
The Record Identifier Feature Name.
"""
return pulumi.get(self, "record_identifier_feature_name")
@record_identifier_feature_name.setter
def record_identifier_feature_name(self, value: pulumi.Input[str]):
pulumi.set(self, "record_identifier_feature_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description about the FeatureGroup.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="featureGroupName")
def feature_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The Name of the FeatureGroup.
"""
return pulumi.get(self, "feature_group_name")
@feature_group_name.setter
def feature_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "feature_group_name", value)
@property
@pulumi.getter(name="offlineStoreConfig")
def offline_store_config(self) -> Optional[pulumi.Input['OfflineStoreConfigPropertiesArgs']]:
return pulumi.get(self, "offline_store_config")
@offline_store_config.setter
def offline_store_config(self, value: Optional[pulumi.Input['OfflineStoreConfigPropertiesArgs']]):
pulumi.set(self, "offline_store_config", value)
@property
@pulumi.getter(name="onlineStoreConfig")
def online_store_config(self) -> Optional[pulumi.Input['OnlineStoreConfigPropertiesArgs']]:
return pulumi.get(self, "online_store_config")
@online_store_config.setter
def online_store_config(self, value: Optional[pulumi.Input['OnlineStoreConfigPropertiesArgs']]):
pulumi.set(self, "online_store_config", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[pulumi.Input[str]]:
"""
Role Arn
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]]]:
"""
An array of key-value pair to apply to this resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]]]):
pulumi.set(self, "tags", value)
class FeatureGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
event_time_feature_name: Optional[pulumi.Input[str]] = None,
feature_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]]] = None,
feature_group_name: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input[pulumi.InputType['OfflineStoreConfigPropertiesArgs']]] = None,
online_store_config: Optional[pulumi.Input[pulumi.InputType['OnlineStoreConfigPropertiesArgs']]] = None,
record_identifier_feature_name: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]]] = None,
__props__=None):
"""
Resource Type definition for AWS::SageMaker::FeatureGroup
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description about the FeatureGroup.
:param pulumi.Input[str] event_time_feature_name: The Event Time Feature Name.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]] feature_definitions: An Array of Feature Definition
:param pulumi.Input[str] feature_group_name: The Name of the FeatureGroup.
:param pulumi.Input[str] record_identifier_feature_name: The Record Identifier Feature Name.
:param pulumi.Input[str] role_arn: Role Arn
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]] tags: An array of key-value pair to apply to this resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: FeatureGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource Type definition for AWS::SageMaker::FeatureGroup
:param str resource_name: The name of the resource.
:param FeatureGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FeatureGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
event_time_feature_name: Optional[pulumi.Input[str]] = None,
feature_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]]] = None,
feature_group_name: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input[pulumi.InputType['OfflineStoreConfigPropertiesArgs']]] = None,
online_store_config: Optional[pulumi.Input[pulumi.InputType['OnlineStoreConfigPropertiesArgs']]] = None,
record_identifier_feature_name: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FeatureGroupArgs.__new__(FeatureGroupArgs)
__props__.__dict__["description"] = description
if event_time_feature_name is None and not opts.urn:
raise TypeError("Missing required property 'event_time_feature_name'")
__props__.__dict__["event_time_feature_name"] = event_time_feature_name
if feature_definitions is None and not opts.urn:
raise TypeError("Missing required property 'feature_definitions'")
__props__.__dict__["feature_definitions"] = feature_definitions
__props__.__dict__["feature_group_name"] = feature_group_name
__props__.__dict__["offline_store_config"] = offline_store_config
__props__.__dict__["online_store_config"] = online_store_config
if record_identifier_feature_name is None and not opts.urn:
raise TypeError("Missing required property 'record_identifier_feature_name'")
__props__.__dict__["record_identifier_feature_name"] = record_identifier_feature_name
__props__.__dict__["role_arn"] = role_arn
__props__.__dict__["tags"] = tags
super(FeatureGroup, __self__).__init__(
'aws-native:sagemaker:FeatureGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'FeatureGroup':
"""
Get an existing FeatureGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = FeatureGroupArgs.__new__(FeatureGroupArgs)
__props__.__dict__["description"] = None
__props__.__dict__["event_time_feature_name"] = None
__props__.__dict__["feature_definitions"] = None
__props__.__dict__["feature_group_name"] = None
__props__.__dict__["offline_store_config"] = None
__props__.__dict__["online_store_config"] = None
__props__.__dict__["record_identifier_feature_name"] = None
__props__.__dict__["role_arn"] = None
__props__.__dict__["tags"] = None
return FeatureGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description about the FeatureGroup.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="eventTimeFeatureName")
def event_time_feature_name(self) -> pulumi.Output[str]:
"""
The Event Time Feature Name.
"""
return pulumi.get(self, "event_time_feature_name")
@property
@pulumi.getter(name="featureDefinitions")
def feature_definitions(self) -> pulumi.Output[Sequence['outputs.FeatureGroupFeatureDefinition']]:
"""
An Array of Feature Definition
"""
return pulumi.get(self, "feature_definitions")
@property
@pulumi.getter(name="featureGroupName")
def feature_group_name(self) -> pulumi.Output[str]:
"""
The Name of the FeatureGroup.
"""
return pulumi.get(self, "feature_group_name")
@property
@pulumi.getter(name="offlineStoreConfig")
def offline_store_config(self) -> pulumi.Output[Optional['outputs.OfflineStoreConfigProperties']]:
return pulumi.get(self, "offline_store_config")
@property
@pulumi.getter(name="onlineStoreConfig")
def online_store_config(self) -> pulumi.Output[Optional['outputs.OnlineStoreConfigProperties']]:
return pulumi.get(self, "online_store_config")
@property
@pulumi.getter(name="recordIdentifierFeatureName")
def record_identifier_feature_name(self) -> pulumi.Output[str]:
"""
The Record Identifier Feature Name.
"""
return pulumi.get(self, "record_identifier_feature_name")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Output[Optional[str]]:
"""
Role Arn
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.FeatureGroupTag']]]:
"""
An array of key-value pair to apply to this resource.
"""
return pulumi.get(self, "tags")
|
def __init__(__self__, *,
event_time_feature_name: pulumi.Input[str],
feature_definitions: pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]],
record_identifier_feature_name: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
feature_group_name: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input['OfflineStoreConfigPropertiesArgs']] = None,
online_store_config: Optional[pulumi.Input['OnlineStoreConfigPropertiesArgs']] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]]] = None):
"""
The set of arguments for constructing a FeatureGroup resource.
:param pulumi.Input[str] event_time_feature_name: The Event Time Feature Name.
:param pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]] feature_definitions: An Array of Feature Definition
:param pulumi.Input[str] record_identifier_feature_name: The Record Identifier Feature Name.
:param pulumi.Input[str] description: Description about the FeatureGroup.
:param pulumi.Input[str] feature_group_name: The Name of the FeatureGroup.
:param pulumi.Input[str] role_arn: Role Arn
:param pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]] tags: An array of key-value pair to apply to this resource.
"""
pulumi.set(__self__, "event_time_feature_name", event_time_feature_name)
pulumi.set(__self__, "feature_definitions", feature_definitions)
pulumi.set(__self__, "record_identifier_feature_name", record_identifier_feature_name)
if description is not None:
pulumi.set(__self__, "description", description)
if feature_group_name is not None:
pulumi.set(__self__, "feature_group_name", feature_group_name)
if offline_store_config is not None:
pulumi.set(__self__, "offline_store_config", offline_store_config)
if online_store_config is not None:
pulumi.set(__self__, "online_store_config", online_store_config)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
if tags is not None:
pulumi.set(__self__, "tags", tags)
| 18 | 52 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['FeatureGroupArgs', 'FeatureGroup']
@pulumi.input_type
class FeatureGroupArgs:
def __init__(__self__, *,
event_time_feature_name: pulumi.Input[str],
feature_definitions: pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]],
record_identifier_feature_name: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
feature_group_name: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input['OfflineStoreConfigPropertiesArgs']] = None,
online_store_config: Optional[pulumi.Input['OnlineStoreConfigPropertiesArgs']] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]]] = None):
"""
The set of arguments for constructing a FeatureGroup resource.
:param pulumi.Input[str] event_time_feature_name: The Event Time Feature Name.
:param pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]] feature_definitions: An Array of Feature Definition
:param pulumi.Input[str] record_identifier_feature_name: The Record Identifier Feature Name.
:param pulumi.Input[str] description: Description about the FeatureGroup.
:param pulumi.Input[str] feature_group_name: The Name of the FeatureGroup.
:param pulumi.Input[str] role_arn: Role Arn
:param pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]] tags: An array of key-value pair to apply to this resource.
"""
pulumi.set(__self__, "event_time_feature_name", event_time_feature_name)
pulumi.set(__self__, "feature_definitions", feature_definitions)
pulumi.set(__self__, "record_identifier_feature_name", record_identifier_feature_name)
if description is not None:
pulumi.set(__self__, "description", description)
if feature_group_name is not None:
pulumi.set(__self__, "feature_group_name", feature_group_name)
if offline_store_config is not None:
pulumi.set(__self__, "offline_store_config", offline_store_config)
if online_store_config is not None:
pulumi.set(__self__, "online_store_config", online_store_config)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="eventTimeFeatureName")
def event_time_feature_name(self) -> pulumi.Input[str]:
"""
The Event Time Feature Name.
"""
return pulumi.get(self, "event_time_feature_name")
@event_time_feature_name.setter
def event_time_feature_name(self, value: pulumi.Input[str]):
pulumi.set(self, "event_time_feature_name", value)
@property
@pulumi.getter(name="featureDefinitions")
def feature_definitions(self) -> pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]]:
"""
An Array of Feature Definition
"""
return pulumi.get(self, "feature_definitions")
@feature_definitions.setter
def feature_definitions(self, value: pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]]):
pulumi.set(self, "feature_definitions", value)
@property
@pulumi.getter(name="recordIdentifierFeatureName")
def record_identifier_feature_name(self) -> pulumi.Input[str]:
"""
The Record Identifier Feature Name.
"""
return pulumi.get(self, "record_identifier_feature_name")
@record_identifier_feature_name.setter
def record_identifier_feature_name(self, value: pulumi.Input[str]):
pulumi.set(self, "record_identifier_feature_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description about the FeatureGroup.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="featureGroupName")
def feature_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The Name of the FeatureGroup.
"""
return pulumi.get(self, "feature_group_name")
@feature_group_name.setter
def feature_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "feature_group_name", value)
@property
@pulumi.getter(name="offlineStoreConfig")
def offline_store_config(self) -> Optional[pulumi.Input['OfflineStoreConfigPropertiesArgs']]:
return pulumi.get(self, "offline_store_config")
@offline_store_config.setter
def offline_store_config(self, value: Optional[pulumi.Input['OfflineStoreConfigPropertiesArgs']]):
pulumi.set(self, "offline_store_config", value)
@property
@pulumi.getter(name="onlineStoreConfig")
def online_store_config(self) -> Optional[pulumi.Input['OnlineStoreConfigPropertiesArgs']]:
return pulumi.get(self, "online_store_config")
@online_store_config.setter
def online_store_config(self, value: Optional[pulumi.Input['OnlineStoreConfigPropertiesArgs']]):
pulumi.set(self, "online_store_config", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[pulumi.Input[str]]:
"""
Role Arn
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]]]:
"""
An array of key-value pair to apply to this resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]]]):
pulumi.set(self, "tags", value)
class FeatureGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
event_time_feature_name: Optional[pulumi.Input[str]] = None,
feature_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]]] = None,
feature_group_name: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input[pulumi.InputType['OfflineStoreConfigPropertiesArgs']]] = None,
online_store_config: Optional[pulumi.Input[pulumi.InputType['OnlineStoreConfigPropertiesArgs']]] = None,
record_identifier_feature_name: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]]] = None,
__props__=None):
"""
Resource Type definition for AWS::SageMaker::FeatureGroup
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description about the FeatureGroup.
:param pulumi.Input[str] event_time_feature_name: The Event Time Feature Name.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]] feature_definitions: An Array of Feature Definition
:param pulumi.Input[str] feature_group_name: The Name of the FeatureGroup.
:param pulumi.Input[str] record_identifier_feature_name: The Record Identifier Feature Name.
:param pulumi.Input[str] role_arn: Role Arn
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]] tags: An array of key-value pair to apply to this resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: FeatureGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource Type definition for AWS::SageMaker::FeatureGroup
:param str resource_name: The name of the resource.
:param FeatureGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FeatureGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
event_time_feature_name: Optional[pulumi.Input[str]] = None,
feature_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]]] = None,
feature_group_name: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input[pulumi.InputType['OfflineStoreConfigPropertiesArgs']]] = None,
online_store_config: Optional[pulumi.Input[pulumi.InputType['OnlineStoreConfigPropertiesArgs']]] = None,
record_identifier_feature_name: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FeatureGroupArgs.__new__(FeatureGroupArgs)
__props__.__dict__["description"] = description
if event_time_feature_name is None and not opts.urn:
raise TypeError("Missing required property 'event_time_feature_name'")
__props__.__dict__["event_time_feature_name"] = event_time_feature_name
if feature_definitions is None and not opts.urn:
raise TypeError("Missing required property 'feature_definitions'")
__props__.__dict__["feature_definitions"] = feature_definitions
__props__.__dict__["feature_group_name"] = feature_group_name
__props__.__dict__["offline_store_config"] = offline_store_config
__props__.__dict__["online_store_config"] = online_store_config
if record_identifier_feature_name is None and not opts.urn:
raise TypeError("Missing required property 'record_identifier_feature_name'")
__props__.__dict__["record_identifier_feature_name"] = record_identifier_feature_name
__props__.__dict__["role_arn"] = role_arn
__props__.__dict__["tags"] = tags
super(FeatureGroup, __self__).__init__(
'aws-native:sagemaker:FeatureGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'FeatureGroup':
"""
Get an existing FeatureGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = FeatureGroupArgs.__new__(FeatureGroupArgs)
__props__.__dict__["description"] = None
__props__.__dict__["event_time_feature_name"] = None
__props__.__dict__["feature_definitions"] = None
__props__.__dict__["feature_group_name"] = None
__props__.__dict__["offline_store_config"] = None
__props__.__dict__["online_store_config"] = None
__props__.__dict__["record_identifier_feature_name"] = None
__props__.__dict__["role_arn"] = None
__props__.__dict__["tags"] = None
return FeatureGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description about the FeatureGroup.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="eventTimeFeatureName")
def event_time_feature_name(self) -> pulumi.Output[str]:
"""
The Event Time Feature Name.
"""
return pulumi.get(self, "event_time_feature_name")
@property
@pulumi.getter(name="featureDefinitions")
def feature_definitions(self) -> pulumi.Output[Sequence['outputs.FeatureGroupFeatureDefinition']]:
"""
An Array of Feature Definition
"""
return pulumi.get(self, "feature_definitions")
@property
@pulumi.getter(name="featureGroupName")
def feature_group_name(self) -> pulumi.Output[str]:
"""
The Name of the FeatureGroup.
"""
return pulumi.get(self, "feature_group_name")
@property
@pulumi.getter(name="offlineStoreConfig")
def offline_store_config(self) -> pulumi.Output[Optional['outputs.OfflineStoreConfigProperties']]:
return pulumi.get(self, "offline_store_config")
@property
@pulumi.getter(name="onlineStoreConfig")
def online_store_config(self) -> pulumi.Output[Optional['outputs.OnlineStoreConfigProperties']]:
return pulumi.get(self, "online_store_config")
@property
@pulumi.getter(name="recordIdentifierFeatureName")
def record_identifier_feature_name(self) -> pulumi.Output[str]:
"""
The Record Identifier Feature Name.
"""
return pulumi.get(self, "record_identifier_feature_name")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Output[Optional[str]]:
"""
Role Arn
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.FeatureGroupTag']]]:
"""
An array of key-value pair to apply to this resource.
"""
return pulumi.get(self, "tags")
|
get
|
Get an existing FeatureGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['FeatureGroupArgs', 'FeatureGroup']
@pulumi.input_type
class FeatureGroupArgs:
def __init__(__self__, *,
event_time_feature_name: pulumi.Input[str],
feature_definitions: pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]],
record_identifier_feature_name: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
feature_group_name: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input['OfflineStoreConfigPropertiesArgs']] = None,
online_store_config: Optional[pulumi.Input['OnlineStoreConfigPropertiesArgs']] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]]] = None):
"""
The set of arguments for constructing a FeatureGroup resource.
:param pulumi.Input[str] event_time_feature_name: The Event Time Feature Name.
:param pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]] feature_definitions: An Array of Feature Definition
:param pulumi.Input[str] record_identifier_feature_name: The Record Identifier Feature Name.
:param pulumi.Input[str] description: Description about the FeatureGroup.
:param pulumi.Input[str] feature_group_name: The Name of the FeatureGroup.
:param pulumi.Input[str] role_arn: Role Arn
:param pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]] tags: An array of key-value pair to apply to this resource.
"""
pulumi.set(__self__, "event_time_feature_name", event_time_feature_name)
pulumi.set(__self__, "feature_definitions", feature_definitions)
pulumi.set(__self__, "record_identifier_feature_name", record_identifier_feature_name)
if description is not None:
pulumi.set(__self__, "description", description)
if feature_group_name is not None:
pulumi.set(__self__, "feature_group_name", feature_group_name)
if offline_store_config is not None:
pulumi.set(__self__, "offline_store_config", offline_store_config)
if online_store_config is not None:
pulumi.set(__self__, "online_store_config", online_store_config)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="eventTimeFeatureName")
def event_time_feature_name(self) -> pulumi.Input[str]:
"""
The Event Time Feature Name.
"""
return pulumi.get(self, "event_time_feature_name")
@event_time_feature_name.setter
def event_time_feature_name(self, value: pulumi.Input[str]):
pulumi.set(self, "event_time_feature_name", value)
@property
@pulumi.getter(name="featureDefinitions")
def feature_definitions(self) -> pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]]:
"""
An Array of Feature Definition
"""
return pulumi.get(self, "feature_definitions")
@feature_definitions.setter
def feature_definitions(self, value: pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]]):
pulumi.set(self, "feature_definitions", value)
@property
@pulumi.getter(name="recordIdentifierFeatureName")
def record_identifier_feature_name(self) -> pulumi.Input[str]:
"""
The Record Identifier Feature Name.
"""
return pulumi.get(self, "record_identifier_feature_name")
@record_identifier_feature_name.setter
def record_identifier_feature_name(self, value: pulumi.Input[str]):
pulumi.set(self, "record_identifier_feature_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description about the FeatureGroup.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="featureGroupName")
def feature_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The Name of the FeatureGroup.
"""
return pulumi.get(self, "feature_group_name")
@feature_group_name.setter
def feature_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "feature_group_name", value)
@property
@pulumi.getter(name="offlineStoreConfig")
def offline_store_config(self) -> Optional[pulumi.Input['OfflineStoreConfigPropertiesArgs']]:
return pulumi.get(self, "offline_store_config")
@offline_store_config.setter
def offline_store_config(self, value: Optional[pulumi.Input['OfflineStoreConfigPropertiesArgs']]):
pulumi.set(self, "offline_store_config", value)
@property
@pulumi.getter(name="onlineStoreConfig")
def online_store_config(self) -> Optional[pulumi.Input['OnlineStoreConfigPropertiesArgs']]:
return pulumi.get(self, "online_store_config")
@online_store_config.setter
def online_store_config(self, value: Optional[pulumi.Input['OnlineStoreConfigPropertiesArgs']]):
pulumi.set(self, "online_store_config", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[pulumi.Input[str]]:
"""
Role Arn
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]]]:
"""
An array of key-value pair to apply to this resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]]]):
pulumi.set(self, "tags", value)
class FeatureGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
event_time_feature_name: Optional[pulumi.Input[str]] = None,
feature_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]]] = None,
feature_group_name: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input[pulumi.InputType['OfflineStoreConfigPropertiesArgs']]] = None,
online_store_config: Optional[pulumi.Input[pulumi.InputType['OnlineStoreConfigPropertiesArgs']]] = None,
record_identifier_feature_name: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]]] = None,
__props__=None):
"""
Resource Type definition for AWS::SageMaker::FeatureGroup
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description about the FeatureGroup.
:param pulumi.Input[str] event_time_feature_name: The Event Time Feature Name.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]] feature_definitions: An Array of Feature Definition
:param pulumi.Input[str] feature_group_name: The Name of the FeatureGroup.
:param pulumi.Input[str] record_identifier_feature_name: The Record Identifier Feature Name.
:param pulumi.Input[str] role_arn: Role Arn
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]] tags: An array of key-value pair to apply to this resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: FeatureGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource Type definition for AWS::SageMaker::FeatureGroup
:param str resource_name: The name of the resource.
:param FeatureGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FeatureGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
event_time_feature_name: Optional[pulumi.Input[str]] = None,
feature_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]]] = None,
feature_group_name: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input[pulumi.InputType['OfflineStoreConfigPropertiesArgs']]] = None,
online_store_config: Optional[pulumi.Input[pulumi.InputType['OnlineStoreConfigPropertiesArgs']]] = None,
record_identifier_feature_name: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FeatureGroupArgs.__new__(FeatureGroupArgs)
__props__.__dict__["description"] = description
if event_time_feature_name is None and not opts.urn:
raise TypeError("Missing required property 'event_time_feature_name'")
__props__.__dict__["event_time_feature_name"] = event_time_feature_name
if feature_definitions is None and not opts.urn:
raise TypeError("Missing required property 'feature_definitions'")
__props__.__dict__["feature_definitions"] = feature_definitions
__props__.__dict__["feature_group_name"] = feature_group_name
__props__.__dict__["offline_store_config"] = offline_store_config
__props__.__dict__["online_store_config"] = online_store_config
if record_identifier_feature_name is None and not opts.urn:
raise TypeError("Missing required property 'record_identifier_feature_name'")
__props__.__dict__["record_identifier_feature_name"] = record_identifier_feature_name
__props__.__dict__["role_arn"] = role_arn
__props__.__dict__["tags"] = tags
super(FeatureGroup, __self__).__init__(
'aws-native:sagemaker:FeatureGroup',
resource_name,
__props__,
opts)
# MASKED: get function (lines 251-276)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description about the FeatureGroup.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="eventTimeFeatureName")
def event_time_feature_name(self) -> pulumi.Output[str]:
"""
The Event Time Feature Name.
"""
return pulumi.get(self, "event_time_feature_name")
@property
@pulumi.getter(name="featureDefinitions")
def feature_definitions(self) -> pulumi.Output[Sequence['outputs.FeatureGroupFeatureDefinition']]:
"""
An Array of Feature Definition
"""
return pulumi.get(self, "feature_definitions")
@property
@pulumi.getter(name="featureGroupName")
def feature_group_name(self) -> pulumi.Output[str]:
"""
The Name of the FeatureGroup.
"""
return pulumi.get(self, "feature_group_name")
@property
@pulumi.getter(name="offlineStoreConfig")
def offline_store_config(self) -> pulumi.Output[Optional['outputs.OfflineStoreConfigProperties']]:
return pulumi.get(self, "offline_store_config")
@property
@pulumi.getter(name="onlineStoreConfig")
def online_store_config(self) -> pulumi.Output[Optional['outputs.OnlineStoreConfigProperties']]:
return pulumi.get(self, "online_store_config")
@property
@pulumi.getter(name="recordIdentifierFeatureName")
def record_identifier_feature_name(self) -> pulumi.Output[str]:
"""
The Record Identifier Feature Name.
"""
return pulumi.get(self, "record_identifier_feature_name")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Output[Optional[str]]:
"""
Role Arn
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.FeatureGroupTag']]]:
"""
An array of key-value pair to apply to this resource.
"""
return pulumi.get(self, "tags")
|
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'FeatureGroup':
"""
Get an existing FeatureGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = FeatureGroupArgs.__new__(FeatureGroupArgs)
__props__.__dict__["description"] = None
__props__.__dict__["event_time_feature_name"] = None
__props__.__dict__["feature_definitions"] = None
__props__.__dict__["feature_group_name"] = None
__props__.__dict__["offline_store_config"] = None
__props__.__dict__["online_store_config"] = None
__props__.__dict__["record_identifier_feature_name"] = None
__props__.__dict__["role_arn"] = None
__props__.__dict__["tags"] = None
return FeatureGroup(resource_name, opts=opts, __props__=__props__)
| 251 | 276 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['FeatureGroupArgs', 'FeatureGroup']
@pulumi.input_type
class FeatureGroupArgs:
def __init__(__self__, *,
event_time_feature_name: pulumi.Input[str],
feature_definitions: pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]],
record_identifier_feature_name: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
feature_group_name: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input['OfflineStoreConfigPropertiesArgs']] = None,
online_store_config: Optional[pulumi.Input['OnlineStoreConfigPropertiesArgs']] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]]] = None):
"""
The set of arguments for constructing a FeatureGroup resource.
:param pulumi.Input[str] event_time_feature_name: The Event Time Feature Name.
:param pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]] feature_definitions: An Array of Feature Definition
:param pulumi.Input[str] record_identifier_feature_name: The Record Identifier Feature Name.
:param pulumi.Input[str] description: Description about the FeatureGroup.
:param pulumi.Input[str] feature_group_name: The Name of the FeatureGroup.
:param pulumi.Input[str] role_arn: Role Arn
:param pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]] tags: An array of key-value pair to apply to this resource.
"""
pulumi.set(__self__, "event_time_feature_name", event_time_feature_name)
pulumi.set(__self__, "feature_definitions", feature_definitions)
pulumi.set(__self__, "record_identifier_feature_name", record_identifier_feature_name)
if description is not None:
pulumi.set(__self__, "description", description)
if feature_group_name is not None:
pulumi.set(__self__, "feature_group_name", feature_group_name)
if offline_store_config is not None:
pulumi.set(__self__, "offline_store_config", offline_store_config)
if online_store_config is not None:
pulumi.set(__self__, "online_store_config", online_store_config)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="eventTimeFeatureName")
def event_time_feature_name(self) -> pulumi.Input[str]:
"""
The Event Time Feature Name.
"""
return pulumi.get(self, "event_time_feature_name")
@event_time_feature_name.setter
def event_time_feature_name(self, value: pulumi.Input[str]):
pulumi.set(self, "event_time_feature_name", value)
@property
@pulumi.getter(name="featureDefinitions")
def feature_definitions(self) -> pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]]:
"""
An Array of Feature Definition
"""
return pulumi.get(self, "feature_definitions")
@feature_definitions.setter
def feature_definitions(self, value: pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]]):
pulumi.set(self, "feature_definitions", value)
@property
@pulumi.getter(name="recordIdentifierFeatureName")
def record_identifier_feature_name(self) -> pulumi.Input[str]:
"""
The Record Identifier Feature Name.
"""
return pulumi.get(self, "record_identifier_feature_name")
@record_identifier_feature_name.setter
def record_identifier_feature_name(self, value: pulumi.Input[str]):
pulumi.set(self, "record_identifier_feature_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description about the FeatureGroup.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="featureGroupName")
def feature_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The Name of the FeatureGroup.
"""
return pulumi.get(self, "feature_group_name")
@feature_group_name.setter
def feature_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "feature_group_name", value)
@property
@pulumi.getter(name="offlineStoreConfig")
def offline_store_config(self) -> Optional[pulumi.Input['OfflineStoreConfigPropertiesArgs']]:
return pulumi.get(self, "offline_store_config")
@offline_store_config.setter
def offline_store_config(self, value: Optional[pulumi.Input['OfflineStoreConfigPropertiesArgs']]):
pulumi.set(self, "offline_store_config", value)
@property
@pulumi.getter(name="onlineStoreConfig")
def online_store_config(self) -> Optional[pulumi.Input['OnlineStoreConfigPropertiesArgs']]:
return pulumi.get(self, "online_store_config")
@online_store_config.setter
def online_store_config(self, value: Optional[pulumi.Input['OnlineStoreConfigPropertiesArgs']]):
pulumi.set(self, "online_store_config", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[pulumi.Input[str]]:
"""
Role Arn
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]]]:
"""
An array of key-value pair to apply to this resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]]]):
pulumi.set(self, "tags", value)
class FeatureGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
event_time_feature_name: Optional[pulumi.Input[str]] = None,
feature_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]]] = None,
feature_group_name: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input[pulumi.InputType['OfflineStoreConfigPropertiesArgs']]] = None,
online_store_config: Optional[pulumi.Input[pulumi.InputType['OnlineStoreConfigPropertiesArgs']]] = None,
record_identifier_feature_name: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]]] = None,
__props__=None):
"""
Resource Type definition for AWS::SageMaker::FeatureGroup
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description about the FeatureGroup.
:param pulumi.Input[str] event_time_feature_name: The Event Time Feature Name.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]] feature_definitions: An Array of Feature Definition
:param pulumi.Input[str] feature_group_name: The Name of the FeatureGroup.
:param pulumi.Input[str] record_identifier_feature_name: The Record Identifier Feature Name.
:param pulumi.Input[str] role_arn: Role Arn
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]] tags: An array of key-value pair to apply to this resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: FeatureGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource Type definition for AWS::SageMaker::FeatureGroup
:param str resource_name: The name of the resource.
:param FeatureGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FeatureGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
event_time_feature_name: Optional[pulumi.Input[str]] = None,
feature_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]]] = None,
feature_group_name: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input[pulumi.InputType['OfflineStoreConfigPropertiesArgs']]] = None,
online_store_config: Optional[pulumi.Input[pulumi.InputType['OnlineStoreConfigPropertiesArgs']]] = None,
record_identifier_feature_name: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FeatureGroupArgs.__new__(FeatureGroupArgs)
__props__.__dict__["description"] = description
if event_time_feature_name is None and not opts.urn:
raise TypeError("Missing required property 'event_time_feature_name'")
__props__.__dict__["event_time_feature_name"] = event_time_feature_name
if feature_definitions is None and not opts.urn:
raise TypeError("Missing required property 'feature_definitions'")
__props__.__dict__["feature_definitions"] = feature_definitions
__props__.__dict__["feature_group_name"] = feature_group_name
__props__.__dict__["offline_store_config"] = offline_store_config
__props__.__dict__["online_store_config"] = online_store_config
if record_identifier_feature_name is None and not opts.urn:
raise TypeError("Missing required property 'record_identifier_feature_name'")
__props__.__dict__["record_identifier_feature_name"] = record_identifier_feature_name
__props__.__dict__["role_arn"] = role_arn
__props__.__dict__["tags"] = tags
super(FeatureGroup, __self__).__init__(
'aws-native:sagemaker:FeatureGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'FeatureGroup':
"""
Get an existing FeatureGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = FeatureGroupArgs.__new__(FeatureGroupArgs)
__props__.__dict__["description"] = None
__props__.__dict__["event_time_feature_name"] = None
__props__.__dict__["feature_definitions"] = None
__props__.__dict__["feature_group_name"] = None
__props__.__dict__["offline_store_config"] = None
__props__.__dict__["online_store_config"] = None
__props__.__dict__["record_identifier_feature_name"] = None
__props__.__dict__["role_arn"] = None
__props__.__dict__["tags"] = None
return FeatureGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description about the FeatureGroup.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="eventTimeFeatureName")
def event_time_feature_name(self) -> pulumi.Output[str]:
"""
The Event Time Feature Name.
"""
return pulumi.get(self, "event_time_feature_name")
@property
@pulumi.getter(name="featureDefinitions")
def feature_definitions(self) -> pulumi.Output[Sequence['outputs.FeatureGroupFeatureDefinition']]:
"""
An Array of Feature Definition
"""
return pulumi.get(self, "feature_definitions")
@property
@pulumi.getter(name="featureGroupName")
def feature_group_name(self) -> pulumi.Output[str]:
"""
The Name of the FeatureGroup.
"""
return pulumi.get(self, "feature_group_name")
@property
@pulumi.getter(name="offlineStoreConfig")
def offline_store_config(self) -> pulumi.Output[Optional['outputs.OfflineStoreConfigProperties']]:
return pulumi.get(self, "offline_store_config")
@property
@pulumi.getter(name="onlineStoreConfig")
def online_store_config(self) -> pulumi.Output[Optional['outputs.OnlineStoreConfigProperties']]:
return pulumi.get(self, "online_store_config")
@property
@pulumi.getter(name="recordIdentifierFeatureName")
def record_identifier_feature_name(self) -> pulumi.Output[str]:
"""
The Record Identifier Feature Name.
"""
return pulumi.get(self, "record_identifier_feature_name")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Output[Optional[str]]:
"""
Role Arn
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.FeatureGroupTag']]]:
"""
An array of key-value pair to apply to this resource.
"""
return pulumi.get(self, "tags")
|
_parse_dsn
|
Method parses dsn
Args:
dsn (str): dsn
Returns:
bool: True
Raises:
exception: Exception
|
# -*- coding: utf-8 -*-
"""DBO MSSQL driver
.. module:: lib.database.dbo.drivers.mssql.driver
:platform: Unix
:synopsis: DBO MSSQL driver
.. moduleauthor:: Petr Rašek <[email protected]>
"""
try:
import pymssql
except ImportError:
raise NotImplementedError('MSSQL client is not supported for PyPy')
from hydratk.lib.database.dbo import dbodriver
class DBODriver(dbodriver.DBODriver):
"""Class DBODriver
"""
_host = None
_port = 1433
_dbname = None
_driver_options = {
'timeout': 5.0,
'detect_types': 0,
# available “DEFERRED”, “IMMEDIATE” or “EXCLUSIVE”
'isolation_level': None,
'check_same_thread': None,
'factory': 'Connection',
'cached_statements': 100
}
# MASKED: _parse_dsn function (lines 36-66)
def _apply_driver_options(self, driver_options):
"""Method sets driver options
Args:
driver_option (dict): driver options
Returns:
void
"""
for optname, optval in driver_options.items():
if optname in self._driver_options:
self._driver_options[optname] = optval
def connect(self):
"""Method connects to database
Args:
none
Returns:
void
"""
self._dbcon = pymssql.connect(
server=self._host, port=self._port, database=self._dbname, user=self._username, password=self._password)
self.result_as_dict(self._result_as_dict)
def close(self):
"""Method disconnects from database
Args:
none
Returns:
void
Raises:
exception: DBODriverException
"""
if type(self._dbcon).__name__.lower() == 'connection':
self._dbcon.close()
else:
raise dbodriver.DBODriverException('Not connected')
def commit(self):
"""Method commits transaction
Args:
none
Returns:
void
Raises:
exception: DBODriverException
"""
if type(self._dbcon).__name__.lower() == 'connection':
self._dbcon.commit()
else:
raise dbodriver.DBODriverException('Not connected')
def error_code(self):
pass
def error_info(self):
pass
def qexec(self):
pass
def get_attribute(self):
pass
def in_transaction(self):
pass
def last_insert_id(self):
pass
def prepare(self):
pass
def query(self):
pass
def execute(self, sql, *parameters):
"""Method executes query
Args:
sql (str): SQL query
parameters (args): query parameters
Returns:
obj: cursor
"""
self._cursor.execute(sql, tuple(parameters))
return self._cursor
def quote(self):
pass
def rollback(self):
"""Method rollbacks transaction
Args:
none
Returns:
void
Raises:
exception: DBODriverException
"""
if type(self._dbcon).__name__.lower() == 'connection':
self._dbcon.rollback()
else:
raise dbodriver.DBODriverException('Not connected')
def set_attribute(self):
pass
def __getitem__(self, name):
"""Method gets item
Args:
name (str): item name
Returns:
obj: item value
"""
if hasattr(pymssql, name):
return getattr(pymssql, name)
def __getattr__(self, name):
"""Method gets attribute
Args:
name (str): attribute name
Returns:
obj: attribute value
"""
if type(self._dbcon).__name__.lower() == 'connection':
if hasattr(self._dbcon, name):
return getattr(self._dbcon, name)
if hasattr(pymssql, name):
return getattr(pymssql, name)
def table_exists(self, table_name):
"""Method checks if table exists
Args:
table_name (str): table
Returns:
bool: result
"""
if table_name is not None and table_name != '':
query = "SELECT count(*) found FROM information_schema.tables WHERE table_catalog=%s AND table_type='BASE TABLE' and table_name=%s"
self._cursor.execute(query, (self._dbname, table_name))
recs = self._cursor.fetchall()
result = True if (recs[0]['found'] == 1) else False
return result
def database_exists(self):
pass
def remove_database(self):
pass
def erase_database(self):
pass
def result_as_dict(self, state):
"""Method enables query result in dictionary form
Args:
state (bool): enable dictionary
Returns:
void
Raises:
error: TypeError
"""
if state in (True, False):
self._result_as_dict = state
if state == True:
self._cursor = self._dbcon.cursor(as_dict=True)
else:
self._cursor = self._dbcon.cursor()
else:
raise TypeError('Boolean value expected')
|
def _parse_dsn(self, dsn):
"""Method parses dsn
Args:
dsn (str): dsn
Returns:
bool: True
Raises:
exception: Exception
"""
dsn_opt = dsn.split(':')[1]
dsn_opt_tokens = dsn_opt.split(';')
for dsn_opt_token in dsn_opt_tokens:
# print(dsn_opt_token)
opt = dsn_opt_token.split('=')
if opt[0] == 'host':
self._host = opt[1]
if opt[0] == 'port':
self._port = int(opt[1])
if opt[0] == 'database':
self._dbname = opt[1]
if opt[0] == 'user':
self._username = opt[1]
if opt[0] == 'password':
self._password = opt[1]
return True
| 36 | 66 |
# -*- coding: utf-8 -*-
"""DBO MSSQL driver
.. module:: lib.database.dbo.drivers.mssql.driver
:platform: Unix
:synopsis: DBO MSSQL driver
.. moduleauthor:: Petr Rašek <[email protected]>
"""
try:
import pymssql
except ImportError:
raise NotImplementedError('MSSQL client is not supported for PyPy')
from hydratk.lib.database.dbo import dbodriver
class DBODriver(dbodriver.DBODriver):
"""Class DBODriver
"""
_host = None
_port = 1433
_dbname = None
_driver_options = {
'timeout': 5.0,
'detect_types': 0,
# available “DEFERRED”, “IMMEDIATE” or “EXCLUSIVE”
'isolation_level': None,
'check_same_thread': None,
'factory': 'Connection',
'cached_statements': 100
}
def _parse_dsn(self, dsn):
"""Method parses dsn
Args:
dsn (str): dsn
Returns:
bool: True
Raises:
exception: Exception
"""
dsn_opt = dsn.split(':')[1]
dsn_opt_tokens = dsn_opt.split(';')
for dsn_opt_token in dsn_opt_tokens:
# print(dsn_opt_token)
opt = dsn_opt_token.split('=')
if opt[0] == 'host':
self._host = opt[1]
if opt[0] == 'port':
self._port = int(opt[1])
if opt[0] == 'database':
self._dbname = opt[1]
if opt[0] == 'user':
self._username = opt[1]
if opt[0] == 'password':
self._password = opt[1]
return True
def _apply_driver_options(self, driver_options):
"""Method sets driver options
Args:
driver_option (dict): driver options
Returns:
void
"""
for optname, optval in driver_options.items():
if optname in self._driver_options:
self._driver_options[optname] = optval
def connect(self):
"""Method connects to database
Args:
none
Returns:
void
"""
self._dbcon = pymssql.connect(
server=self._host, port=self._port, database=self._dbname, user=self._username, password=self._password)
self.result_as_dict(self._result_as_dict)
def close(self):
"""Method disconnects from database
Args:
none
Returns:
void
Raises:
exception: DBODriverException
"""
if type(self._dbcon).__name__.lower() == 'connection':
self._dbcon.close()
else:
raise dbodriver.DBODriverException('Not connected')
def commit(self):
"""Method commits transaction
Args:
none
Returns:
void
Raises:
exception: DBODriverException
"""
if type(self._dbcon).__name__.lower() == 'connection':
self._dbcon.commit()
else:
raise dbodriver.DBODriverException('Not connected')
def error_code(self):
pass
def error_info(self):
pass
def qexec(self):
pass
def get_attribute(self):
pass
def in_transaction(self):
pass
def last_insert_id(self):
pass
def prepare(self):
pass
def query(self):
pass
def execute(self, sql, *parameters):
"""Method executes query
Args:
sql (str): SQL query
parameters (args): query parameters
Returns:
obj: cursor
"""
self._cursor.execute(sql, tuple(parameters))
return self._cursor
def quote(self):
pass
def rollback(self):
"""Method rollbacks transaction
Args:
none
Returns:
void
Raises:
exception: DBODriverException
"""
if type(self._dbcon).__name__.lower() == 'connection':
self._dbcon.rollback()
else:
raise dbodriver.DBODriverException('Not connected')
def set_attribute(self):
pass
def __getitem__(self, name):
"""Method gets item
Args:
name (str): item name
Returns:
obj: item value
"""
if hasattr(pymssql, name):
return getattr(pymssql, name)
def __getattr__(self, name):
"""Method gets attribute
Args:
name (str): attribute name
Returns:
obj: attribute value
"""
if type(self._dbcon).__name__.lower() == 'connection':
if hasattr(self._dbcon, name):
return getattr(self._dbcon, name)
if hasattr(pymssql, name):
return getattr(pymssql, name)
def table_exists(self, table_name):
"""Method checks if table exists
Args:
table_name (str): table
Returns:
bool: result
"""
if table_name is not None and table_name != '':
query = "SELECT count(*) found FROM information_schema.tables WHERE table_catalog=%s AND table_type='BASE TABLE' and table_name=%s"
self._cursor.execute(query, (self._dbname, table_name))
recs = self._cursor.fetchall()
result = True if (recs[0]['found'] == 1) else False
return result
def database_exists(self):
pass
def remove_database(self):
pass
def erase_database(self):
pass
def result_as_dict(self, state):
"""Method enables query result in dictionary form
Args:
state (bool): enable dictionary
Returns:
void
Raises:
error: TypeError
"""
if state in (True, False):
self._result_as_dict = state
if state == True:
self._cursor = self._dbcon.cursor(as_dict=True)
else:
self._cursor = self._dbcon.cursor()
else:
raise TypeError('Boolean value expected')
|
calc_is_correct
|
gt_bboxes: (N, 4) np.array in xywh format
pred_bboxes: (N, 5) np.array in conf+xywh format
|
import sys
import cv2
import os
from ast import literal_eval
from pathlib import Path
import shutil
import logging
import random
import pickle
import yaml
import subprocess
from PIL import Image
from glob import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import animation, rc
plt.rcParams['figure.figsize'] = 30, 30
np.set_printoptions(precision=3, suppress=True)
rc('animation', html='jshtml')
import torch
from augmentations import get_albu_transforms
IMAGE_DIR = '~/Kaggle/data/tensorflow-great-barrier-reef/train_images'
def load_image(video_id, video_frame, image_dir):
img_path = f'{image_dir}/video_{video_id}/{video_frame}.jpg'
assert os.path.exists(img_path), f'{img_path} does not exist.'
img = cv2.imread(img_path)
return img
def decode_annotations(annotaitons_str):
"""decode annotations in string to list of dict"""
return literal_eval(annotaitons_str)
def load_image_with_annotations(video_id, video_frame, image_dir, annotaitons_str):
img = load_image(video_id, video_frame, image_dir)
annotations = decode_annotations(annotaitons_str)
if len(annotations) > 0:
for ann in annotations:
cv2.rectangle(img, (ann['x'], ann['y']),
(ann['x'] + ann['width'], ann['y'] + ann['height']),
(255, 0, 0), thickness=2,)
return img
def draw_predictions(img, pred_bboxes):
img = img.copy()
if len(pred_bboxes) > 0:
for bbox in pred_bboxes:
conf = bbox[0]
x, y, w, h = bbox[1:].round().astype(int)
cv2.rectangle(img, (x, y),(x+w, y+h),(0, 255, 255), thickness=2,)
cv2.putText(img, f"{conf:.2}",(x, max(0, y-5)),
cv2.FONT_HERSHEY_SIMPLEX,0.5,(0, 0, 255),
thickness=1,
)
return img
def plot_img(df, idx, image_dir, pred_bboxes=None):
row = df.iloc[idx]
video_id = row.video_id
video_frame = row.video_frame
annotations_str = row.annotations
img = load_image_with_annotations(video_id, video_frame, image_dir, annotations_str)
if pred_bboxes and len(pred_bboxes) > 0:
pred_bboxes = pred_bboxes[pred_bboxes[:,0].argsort()[::-1]] # sort by conf
img = draw_predictions(img, pred_bboxes)
plt.imshow(img[:, :, ::-1])
def calc_iou(bboxes1, bboxes2, bbox_mode='xywh'):
assert len(bboxes1.shape) == 2 and bboxes1.shape[1] == 4
assert len(bboxes2.shape) == 2 and bboxes2.shape[1] == 4
bboxes1 = bboxes1.copy()
bboxes2 = bboxes2.copy()
if bbox_mode == 'xywh':
bboxes1[:, 2:] += bboxes1[:, :2]
bboxes2[:, 2:] += bboxes2[:, :2]
x11, y11, x12, y12 = np.split(bboxes1, 4, axis=1)
x21, y21, x22, y22 = np.split(bboxes2, 4, axis=1)
xA = np.maximum(x11, np.transpose(x21))
yA = np.maximum(y11, np.transpose(y21))
xB = np.minimum(x12, np.transpose(x22))
yB = np.minimum(y12, np.transpose(y22))
interArea = np.maximum((xB - xA + 1e-9), 0) * np.maximum((yB - yA + 1e-9), 0)
boxAArea = (x12 - x11 + 1e-9) * (y12 - y11 + 1e-9)
boxBArea = (x22 - x21 + 1e-9) * (y22 - y21 + 1e-9)
iou = interArea / (boxAArea + np.transpose(boxBArea) - interArea)
return iou
def f_beta(tp, fp, fn, beta=2):
if tp == 0:
return 0
return (1+beta**2)*tp / ((1+beta**2)*tp + beta**2*fn+fp)
def calc_is_correct_at_iou_th(gt_bboxes, pred_bboxes, iou_th, verbose=False):
gt_bboxes = gt_bboxes.copy()
pred_bboxes = pred_bboxes.copy()
tp = 0
fp = 0
for k, pred_bbox in enumerate(pred_bboxes): # fixed in ver.7
if len(gt_bboxes) == 0:
fp += len(pred_bboxes) - k # fix in ver.7
break
ious = calc_iou(gt_bboxes, pred_bbox[None, 1:])
max_iou = ious.max()
if max_iou >= iou_th:
tp += 1
gt_bboxes = np.delete(gt_bboxes, ious.argmax(), axis=0)
else:
fp += 1
fn = len(gt_bboxes)
return tp, fp, fn
# MASKED: calc_is_correct function (lines 125-149)
def calc_f2_score(gt_bboxes_list, pred_bboxes_list, verbose=False):
"""
gt_bboxes_list: list of (N, 4) np.array in xywh format
pred_bboxes_list: list of (N, 5) np.array in conf+xywh format
"""
#f2s = []
f2_dict = {'f2':0, "P":0, "R": 0}
all_tps = [list([0] * 11) for _ in range(len(gt_bboxes_list))]
all_fps = [list([0] * 11) for _ in range(len(gt_bboxes_list))]
all_fns = [list([0] * 11) for _ in range(len(gt_bboxes_list))]
for k, iou_th in enumerate(np.arange(0.3, 0.85, 0.05)):
tps, fps, fns = 0, 0, 0
for i, (gt_bboxes, pred_bboxes) in enumerate(zip(gt_bboxes_list, pred_bboxes_list)):
tp, fp, fn = calc_is_correct(gt_bboxes, pred_bboxes, iou_th)
tps += tp
fps += fp
fns += fn
all_tps[i][k] = tp
all_fps[i][k] = fp
all_fns[i][k] = fn
if verbose:
num_gt = len(gt_bboxes)
num_pred = len(pred_bboxes)
print(f'num_gt:{num_gt:<3} num_pred:{num_pred:<3} tp:{tp:<3} fp:{fp:<3} fn:{fn:<3}')
f2 = f_beta(tps, fps, fns, beta=2)
precision = f_beta(tps, fps, fns, beta=0)
recall = f_beta(tps, fps, fns, beta=100)
f2_dict["f2_" + str(round(iou_th,3))] = f2
f2_dict["P_" + str(round(iou_th,3))] = precision
f2_dict["R_" + str(round(iou_th,3))] = recall
f2_dict['f2'] += f2 / 11
f2_dict['P'] += precision / 11
f2_dict['R'] += recall / 11
f2_dict["tps"] = all_tps
f2_dict["fps"] = all_fps
f2_dict["fns"] = all_fns
return f2_dict
def print_f2_dict(d):
print("Overall f2: {:.3f}, precision {:.3f}, recall {:.3f}".format(d['f2'], d['precision'], d['recall']))
for k, iou_th in enumerate(np.arange(0.3, 0.85, 0.05)):
print(f"IOU {iou_th:.2f}:", end=" ")
print("f2: {:.3f}, precision {:.3f}, recall {:.3f}".format(d["f2_" + str(round(iou_th,3))],
d["precision_" + str(round(iou_th,3))],
d["recall_" + str(round(iou_th,3))]))
def get_path(row, params, infer=False):
row['old_image_path'] = params['root_dir'] / f'train_images/video_{row.video_id}/{row.video_frame}.jpg'
if infer:
row['image_path'] = row["old_image_path"]
else:
row['image_path'] = params['image_dir'] / f'video_{row.video_id}_{row.video_frame}.jpg'
row['label_path'] = params['label_dir'] / f'video_{row.video_id}_{row.video_frame}.txt'
return row
def make_copy(path, params):
# TODO: fix split issue
data = str(path).split('/')
filename = data[-1]
video_id = data[-2]
new_path = params["image_dir"] / f'{video_id}_{filename}'
shutil.copy(path, new_path)
return
# https://www.kaggle.com/awsaf49/great-barrier-reef-yolov5-train
def voc2yolo(image_height, image_width, bboxes):
"""
voc => [x1, y1, x2, y1]
yolo => [xmid, ymid, w, h] (normalized)
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
bboxes[..., [0, 2]] = bboxes[..., [0, 2]]/ image_width
bboxes[..., [1, 3]] = bboxes[..., [1, 3]]/ image_height
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
bboxes[..., 0] = bboxes[..., 0] + w/2
bboxes[..., 1] = bboxes[..., 1] + h/2
bboxes[..., 2] = w
bboxes[..., 3] = h
return bboxes
def yolo2voc(image_height, image_width, bboxes):
"""
yolo => [xmid, ymid, w, h] (normalized)
voc => [x1, y1, x2, y1]
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
bboxes[..., [0, 2]] = bboxes[..., [0, 2]]* image_width
bboxes[..., [1, 3]] = bboxes[..., [1, 3]]* image_height
bboxes[..., [0, 1]] = bboxes[..., [0, 1]] - bboxes[..., [2, 3]]/2
bboxes[..., [2, 3]] = bboxes[..., [0, 1]] + bboxes[..., [2, 3]]
return bboxes
def coco2yolo(image_height, image_width, bboxes):
"""
coco => [xmin, ymin, w, h]
yolo => [xmid, ymid, w, h] (normalized)
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
# normolizinig
bboxes[..., [0, 2]]= bboxes[..., [0, 2]]/ image_width
bboxes[..., [1, 3]]= bboxes[..., [1, 3]]/ image_height
# converstion (xmin, ymin) => (xmid, ymid)
bboxes[..., [0, 1]] = bboxes[..., [0, 1]] + bboxes[..., [2, 3]]/2
return bboxes
def yolo2coco(image_height, image_width, bboxes):
"""
yolo => [xmid, ymid, w, h] (normalized)
coco => [xmin, ymin, w, h]
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
# denormalizing
bboxes[..., [0, 2]]= bboxes[..., [0, 2]]* image_width
bboxes[..., [1, 3]]= bboxes[..., [1, 3]]* image_height
# converstion (xmid, ymid) => (xmin, ymin)
bboxes[..., [0, 1]] = bboxes[..., [0, 1]] - bboxes[..., [2, 3]]/2
return bboxes
def voc2coco(bboxes, image_height=720, image_width=1280):
bboxes = voc2yolo(image_height, image_width, bboxes)
bboxes = yolo2coco(image_height, image_width, bboxes)
return bboxes
def load_image(image_path):
return cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def draw_bboxes(img, bboxes, classes, colors = None, show_classes = None, bbox_format = 'yolo', class_name = False, line_thickness = 1):
image = img.copy()
show_classes = classes if show_classes is None else show_classes
colors = (0, 255 ,0) if colors is None else colors
if bbox_format == 'yolo':
for idx in range(len(bboxes)):
bbox = bboxes[idx]
cls = classes[idx]
color = colors[idx]
if cls in show_classes:
x1 = round(float(bbox[0])*image.shape[1])
y1 = round(float(bbox[1])*image.shape[0])
w = round(float(bbox[2])*image.shape[1]/2) #w/2
h = round(float(bbox[3])*image.shape[0]/2)
voc_bbox = (x1-w, y1-h, x1+w, y1+h)
plot_one_box(voc_bbox,
image,
color = color,
label = cls if class_name else str(get_label(cls)),
line_thickness = line_thickness)
elif bbox_format == 'coco':
for idx in range(len(bboxes)):
bbox = bboxes[idx]
cls = classes[idx]
color = colors[idx]
if cls in show_classes:
x1 = int(round(bbox[0]))
y1 = int(round(bbox[1]))
w = int(round(bbox[2]))
h = int(round(bbox[3]))
voc_bbox = (x1, y1, x1+w, y1+h)
plot_one_box(voc_bbox,
image,
color = color,
label = cls,
line_thickness = line_thickness)
elif bbox_format == 'voc_pascal':
for idx in range(len(bboxes)):
bbox = bboxes[idx]
cls = classes[idx]
cls_id = class_ids[idx]
color = colors[cls_id] if type(colors) is list else colors
if cls in show_classes:
x1 = int(round(bbox[0]))
y1 = int(round(bbox[1]))
x2 = int(round(bbox[2]))
y2 = int(round(bbox[3]))
voc_bbox = (x1, y1, x2, y2)
plot_one_box(voc_bbox,
image,
color = color,
label = cls if class_name else str(cls_id),
line_thickness = line_thickness)
else:
raise ValueError('wrong bbox format')
return image
def get_bbox(annots):
bboxes = [list(annot.values()) for annot in annots]
return bboxes
def get_imgsize(row):
row['width'], row['height'] = imagesize.get(row['image_path'])
return row
# https://www.kaggle.com/diegoalejogm/great-barrier-reefs-eda-with-animations
def create_animation(ims):
fig = plt.figure(figsize=(16, 12))
plt.axis('off')
im = plt.imshow(ims[0])
def animate_func(i):
im.set_array(ims[i])
return [im]
return animation.FuncAnimation(fig, animate_func, frames = len(ims), interval = 1000//12)
# https://github.com/rbgirshick/fast-rcnn/blob/master/lib/utils/nms.py
def nms(dets, thresh):
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
# https://github.com/DocF/Soft-NMS/blob/master/soft_nms.py
def py_cpu_softnms(dets, sc, Nt=0.3, sigma=0.5, thresh=0.001, method=2):
"""
py_cpu_softnms
:param dets: boexs 坐标矩阵 format [y1, x1, y2, x2]
:param sc: 每个 boxes 对应的分数
:param Nt: iou 交叠门限
:param sigma: 使用 gaussian 函数的方差
:param thresh: 最后的分数门限
:param method: 使用的方法
:return: 留下的 boxes 的 index
"""
# indexes concatenate boxes with the last column
N = dets.shape[0]
indexes = np.array([np.arange(N)])
dets = np.concatenate((dets, indexes.T), axis=1)
# the order of boxes coordinate is [y1,x1,y2,x2]
y1 = dets[:, 0]
x1 = dets[:, 1]
y2 = dets[:, 2]
x2 = dets[:, 3]
scores = sc
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
for i in range(N):
# intermediate parameters for later parameters exchange
tBD = dets[i, :].copy()
tscore = scores[i].copy()
tarea = areas[i].copy()
pos = i + 1
#
if i != N-1:
maxscore = np.max(scores[pos:], axis=0)
maxpos = np.argmax(scores[pos:], axis=0)
else:
maxscore = scores[-1]
maxpos = 0
if tscore < maxscore:
dets[i, :] = dets[maxpos + i + 1, :]
dets[maxpos + i + 1, :] = tBD
tBD = dets[i, :]
scores[i] = scores[maxpos + i + 1]
scores[maxpos + i + 1] = tscore
tscore = scores[i]
areas[i] = areas[maxpos + i + 1]
areas[maxpos + i + 1] = tarea
tarea = areas[i]
# IoU calculate
xx1 = np.maximum(dets[i, 1], dets[pos:, 1])
yy1 = np.maximum(dets[i, 0], dets[pos:, 0])
xx2 = np.minimum(dets[i, 3], dets[pos:, 3])
yy2 = np.minimum(dets[i, 2], dets[pos:, 2])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[pos:] - inter)
# Three methods: 1.linear 2.gaussian 3.original NMS
if method == 1: # linear
weight = np.ones(ovr.shape)
weight[ovr > Nt] = weight[ovr > Nt] - ovr[ovr > Nt]
elif method == 2: # gaussian
weight = np.exp(-(ovr * ovr) / sigma)
else: # original NMS
weight = np.ones(ovr.shape)
weight[ovr > Nt] = 0
scores[pos:] = weight * scores[pos:]
# select the boxes and keep the corresponding indexes
inds = dets[:, 4][scores > thresh]
keep = inds.astype(int)
return keep
def seed_torch(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
def create_logger(filename, filemode='a'):
# better logging file - output the in terminal as well
file_handler = logging.FileHandler(filename=filename, mode=filemode)
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [file_handler, stdout_handler]
formatter = "%(asctime)s %(levelname)s: %(message)s"
datefmt = "%m/%d/%Y %I:%M:%S %p"
logging.basicConfig(format=formatter, datefmt=datefmt,
level=logging.DEBUG, handlers=handlers)
return
def save_pickle(obj, folder_path):
pickle.dump(obj, open(folder_path, 'wb'), pickle.HIGHEST_PROTOCOL)
def load_pickle(folder_path):
return pickle.load(open(folder_path, 'rb'))
def save_yaml(obj, folder_path):
obj2 = obj.copy()
for key, value in obj2.items():
if isinstance(value, Path):
obj2[key] = str(value.resolve())
else:
obj2[key] = value
with open(folder_path, 'w') as file:
yaml.dump(obj2, file)
def load_yaml(folder_path):
with open(folder_path) as file:
data = yaml.load(file, Loader=yaml.FullLoader)
return data
def load_model(params):
try:
model = torch.hub.load(params['repo'],
'custom',
path=params['ckpt_path'],
source='local',
force_reload=True) # local repo
except:
print("torch.hub.load failed, try torch.load")
model = torch.load(params['ckpt_path'])
model.conf = params['conf'] # NMS confidence threshold
model.iou = params['iou'] # NMS IoU threshold
model.classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for persons, cats and dogs
model.multi_label = False # NMS multiple labels per box
model.max_det = 50 # maximum number of detections per image
return model
def predict(model, img, size=768, augment=False, use_sahi=False):
if use_sahi:
from sahi.predict import get_sliced_prediction
results = get_sliced_prediction(
img,
model,
slice_height = 512,
slice_width = 512,
overlap_height_ratio = 0.2,
overlap_width_ratio = 0.2
)
preds = results.object_prediction_list
bboxes = np.array([pred.bbox.to_voc_bbox() for pred in preds])
else:
results = model(img, size=size, augment=augment) # custom inference size
preds = results.pandas().xyxy[0]
bboxes = preds[['xmin','ymin','xmax','ymax']].values
if len(bboxes):
height, width = img.shape[:2]
bboxes = voc2coco(bboxes,height,width).astype(int)
if use_sahi:
confs = np.array([pred.score.value for pred in preds])
else:
confs = preds.confidence.values
return bboxes, confs
else:
return np.array([]),[]
def format_prediction(bboxes, confs):
annot = ''
if len(bboxes)>0:
for idx in range(len(bboxes)):
xmin, ymin, w, h = bboxes[idx]
conf = confs[idx]
annot += f'{conf} {xmin} {ymin} {w} {h}'
annot +=' '
annot = annot.strip(' ')
return annot
def show_img(img, bboxes, confs, colors, bbox_format='yolo'):
labels = [str(round(conf,2)) for conf in confs]
img = draw_bboxes(img = img,
bboxes = bboxes,
classes = labels,
class_name = True,
colors = colors,
bbox_format = bbox_format,
line_thickness = 2)
return Image.fromarray(img)
def write_hyp(params):
with open(params["hyp_file"], mode="w") as f:
for key, val in params["hyp_param"].items():
f.write(f"{key}: {val}\n")
def class2dict(f):
return dict((name, getattr(f, name)) for name in dir(f) if not name.startswith('__'))
def upload(params):
data_version = "-".join(params["exp_name"].split("_"))
if os.path.exists(params["output_dir"] / "wandb"):
shutil.move(str(params["output_dir"] / "wandb"),
str(params["output_dir"].parent / f"{params['exp_name']}_wandb/")
)
with open(params["output_dir"] / "dataset-metadata.json", "w") as f:
f.write("{\n")
f.write(f""" "title": "{data_version}",\n""")
f.write(f""" "id": "vincentwang25/{data_version}",\n""")
f.write(""" "licenses": [\n""")
f.write(""" {\n""")
f.write(""" "name": "CC0-1.0"\n""")
f.write(""" }\n""")
f.write(""" ]\n""")
f.write("""}""")
subprocess.call(["kaggle", "datasets", "create", "-p", str(params["output_dir"]), "-r", "zip"])
def coco(df):
annotion_id = 0
images = []
annotations = []
categories = [{'id': 0, 'name': 'cots'}]
for i, row in df.iterrows():
images.append({
"id": i,
"file_name": f"video_{row['video_id']}_{row['video_frame']}.jpg",
"height": 720,
"width": 1280,
})
for bbox in row['annotations']:
annotations.append({
"id": annotion_id,
"image_id": i,
"category_id": 0,
"bbox": list(bbox.values()),
"area": bbox['width'] * bbox['height'],
"segmentation": [],
"iscrowd": 0
})
annotion_id += 1
json_file = {'categories':categories, 'images':images, 'annotations':annotations}
return json_file
def mmcfg_from_param(params):
from mmcv import Config
# model
cfg = Config.fromfile(params['hyp_param']['base_file'])
cfg.work_dir = str(params['output_dir'])
cfg.seed = 2022
cfg.gpu_ids = range(2)
cfg.load_from = params['hyp_param']['load_from']
if params['hyp_param']['model_type'] == 'faster_rcnn':
cfg.model.roi_head.bbox_head.num_classes = 1
cfg.model.roi_head.bbox_head.loss_bbox.type = params['hyp_param']['loss_fnc']
cfg.model.rpn_head.loss_bbox.type = params['hyp_param']['loss_fnc']
if params['hyp_param']['loss_fnc'] == "GIoULoss":
cfg.model.roi_head.bbox_head.reg_decoded_bbox = True
cfg.model.rpn_head.reg_decoded_bbox = True
cfg.model.train_cfg.rpn_proposal.nms.type = params['hyp_param']['nms']
cfg.model.test_cfg.rpn.nms.type = params['hyp_param']['nms']
cfg.model.test_cfg.rcnn.nms.type = params['hyp_param']['nms']
cfg.model.train_cfg.rcnn.sampler.type = params['hyp_param']['sampler']
elif params['hyp_param']['model_type'] == 'swin':
pass # already changed
elif params['hyp_param']['model_type'] == 'vfnet':
cfg.model.bbox_head.num_classes = 1
if params['hyp_param'].get("optimizer", cfg.optimizer.type) == "AdamW":
cfg.optimizer = dict(
type="AdamW",
lr=params['hyp_param'].get("lr", cfg.optimizer.lr),
weight_decay=params['hyp_param'].get(
"weight_decay", cfg.optimizer.weight_decay
),
)
else:
cfg.optimizer.lr = params['hyp_param'].get("lr", cfg.optimizer.lr)
cfg.optimizer.weight_decay = params['hyp_param'].get(
"weight_decay", cfg.optimizer.weight_decay)
cfg.lr_config = dict(
policy='CosineAnnealing',
by_epoch=False,
warmup='linear',
warmup_iters= 1000,
warmup_ratio= 1/10,
min_lr=1e-07)
# data
cfg = add_data_pipeline(cfg, params)
cfg.runner.max_epochs = params['epochs']
cfg.evaluation.start = 1
cfg.evaluation.interval = 1
cfg.evaluation.save_best='auto'
cfg.evaluation.metric ='bbox'
cfg.checkpoint_config.interval = -1
cfg.log_config.interval = 500
cfg.log_config.with_step = True
cfg.log_config.by_epoch = True
cfg.log_config.hooks =[dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')]
cfg.workflow = [('train',1)]
logging.info(str(cfg))
return cfg
def add_data_pipeline(cfg, params):
cfg.dataset_type = 'COCODataset'
cfg.classes = ('cots',)
cfg.data_root = str(params['data_path'].resolve())
params['aug_param']['img_scale'] = (params['img_size'], params['img_size'])
cfg.img_scale = params['aug_param']['img_scale']
cfg.dataset_type = 'CocoDataset'
cfg.filter_empty_gt = False
cfg.data.filter_empty_gt = False
cfg.data.train.type = cfg.dataset_type
cfg.data.train.classes = cfg.classes
cfg.data.train.ann_file = str(params["cfg_dir"] / 'annotations_train.json')
cfg.data.train.img_prefix = cfg.data_root + '/images/'
cfg.data.train.filter_empty_gt = False
cfg.data.test.type = cfg.dataset_type
cfg.data.test.classes = cfg.classes
cfg.data.test.ann_file = str(params["cfg_dir"] / 'annotations_valid.json')
cfg.data.test.img_prefix = cfg.data_root + '/images/'
cfg.data.test.filter_empty_gt = False
cfg.data.val.type = cfg.dataset_type
cfg.data.val.classes = cfg.classes
cfg.data.val.ann_file = str(params["cfg_dir"] / 'annotations_valid.json')
cfg.data.val.img_prefix = cfg.data_root + '/images/'
cfg.data.val.filter_empty_gt = False
cfg.data.samples_per_gpu = params['batch'] // len(cfg.gpu_ids)
cfg.data.workers_per_gpu = params['workers'] // len(cfg.gpu_ids)
# train pipeline
albu_train_transforms = get_albu_transforms(params['aug_param'], is_train=True)
if params['aug_param']['use_mixup'] or params['aug_param']['use_mosaic']:
train_pipeline = []
else:
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)]
if params['aug_param']['use_mosaic']:
train_pipeline.append(dict(type='Mosaic', img_scale=cfg.img_scale, pad_val=114.0))
else:
train_pipeline.append(dict(type='Resize', img_scale=cfg.img_scale, keep_ratio=False))
train_pipeline = train_pipeline +[
dict(type='Pad', size_divisor=32),
dict(
type='Albu',
transforms=albu_train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_labels'],
min_visibility=0.0,
filter_lost_elements=True),
keymap={
'img': 'image',
'gt_bboxes': 'bboxes'
},
update_pad_shape=False,
skip_img_without_anno=False
)]
if params['aug_param']['use_mixup']:
train_pipeline.append(dict(type='MixUp', img_scale=cfg.img_scale, ratio_range=(0.8, 1.6), pad_val=114.0))
train_pipeline = train_pipeline +\
[
dict(type='Normalize', **cfg.img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels'],
meta_keys=('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'img_norm_cfg')),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=cfg.img_scale,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **cfg.img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=[cfg.img_scale],
flip=[False],
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Pad', size_divisor=32),
dict(type='RandomFlip', direction='horizontal'),
dict(type='Normalize', **cfg.img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
cfg.train_pipeline = train_pipeline
cfg.val_pipeline = val_pipeline
cfg.test_pipeline = test_pipeline
if params['aug_param']['use_mixup'] or params['aug_param']['use_mosaic']:
cfg.train_dataset = dict(
type='MultiImageMixDataset',
dataset=dict(
type=cfg.dataset_type,
classes=cfg.classes,
ann_file=str(params["cfg_dir"] / 'annotations_train.json'),
img_prefix=cfg.data_root + '/images/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)
],
filter_empty_gt=False,
),
pipeline=cfg.train_pipeline
)
cfg.data.train = cfg.train_dataset
else:
cfg.data.train.pipeline = cfg.train_pipeline
cfg.data.val.pipeline = cfg.val_pipeline
cfg.data.test.pipeline = cfg.test_pipeline
return cfg
def find_ckp(output_dir):
return glob(output_dir / "best*.pth")[0]
|
def calc_is_correct(gt_bboxes, pred_bboxes, iou_th=0.5):
"""
gt_bboxes: (N, 4) np.array in xywh format
pred_bboxes: (N, 5) np.array in conf+xywh format
"""
if len(gt_bboxes) == 0 and len(pred_bboxes) == 0:
tps, fps, fns = 0, 0, 0
return tps, fps, fns
elif len(gt_bboxes) == 0:
tps, fps, fns = 0, len(pred_bboxes), 0
return tps, fps, fns
elif len(pred_bboxes) == 0:
tps, fps, fns = 0, 0, len(gt_bboxes)
return tps, fps, fns
pred_bboxes = pred_bboxes[pred_bboxes[:,0].argsort()[::-1]] # sort by conf
tps, fps, fns = 0, 0, 0
tp, fp, fn = calc_is_correct_at_iou_th(gt_bboxes, pred_bboxes, iou_th)
tps += tp
fps += fp
fns += fn
return tps, fps, fns
| 125 | 149 |
import sys
import cv2
import os
from ast import literal_eval
from pathlib import Path
import shutil
import logging
import random
import pickle
import yaml
import subprocess
from PIL import Image
from glob import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import animation, rc
plt.rcParams['figure.figsize'] = 30, 30
np.set_printoptions(precision=3, suppress=True)
rc('animation', html='jshtml')
import torch
from augmentations import get_albu_transforms
IMAGE_DIR = '~/Kaggle/data/tensorflow-great-barrier-reef/train_images'
def load_image(video_id, video_frame, image_dir):
img_path = f'{image_dir}/video_{video_id}/{video_frame}.jpg'
assert os.path.exists(img_path), f'{img_path} does not exist.'
img = cv2.imread(img_path)
return img
def decode_annotations(annotaitons_str):
"""decode annotations in string to list of dict"""
return literal_eval(annotaitons_str)
def load_image_with_annotations(video_id, video_frame, image_dir, annotaitons_str):
img = load_image(video_id, video_frame, image_dir)
annotations = decode_annotations(annotaitons_str)
if len(annotations) > 0:
for ann in annotations:
cv2.rectangle(img, (ann['x'], ann['y']),
(ann['x'] + ann['width'], ann['y'] + ann['height']),
(255, 0, 0), thickness=2,)
return img
def draw_predictions(img, pred_bboxes):
img = img.copy()
if len(pred_bboxes) > 0:
for bbox in pred_bboxes:
conf = bbox[0]
x, y, w, h = bbox[1:].round().astype(int)
cv2.rectangle(img, (x, y),(x+w, y+h),(0, 255, 255), thickness=2,)
cv2.putText(img, f"{conf:.2}",(x, max(0, y-5)),
cv2.FONT_HERSHEY_SIMPLEX,0.5,(0, 0, 255),
thickness=1,
)
return img
def plot_img(df, idx, image_dir, pred_bboxes=None):
row = df.iloc[idx]
video_id = row.video_id
video_frame = row.video_frame
annotations_str = row.annotations
img = load_image_with_annotations(video_id, video_frame, image_dir, annotations_str)
if pred_bboxes and len(pred_bboxes) > 0:
pred_bboxes = pred_bboxes[pred_bboxes[:,0].argsort()[::-1]] # sort by conf
img = draw_predictions(img, pred_bboxes)
plt.imshow(img[:, :, ::-1])
def calc_iou(bboxes1, bboxes2, bbox_mode='xywh'):
assert len(bboxes1.shape) == 2 and bboxes1.shape[1] == 4
assert len(bboxes2.shape) == 2 and bboxes2.shape[1] == 4
bboxes1 = bboxes1.copy()
bboxes2 = bboxes2.copy()
if bbox_mode == 'xywh':
bboxes1[:, 2:] += bboxes1[:, :2]
bboxes2[:, 2:] += bboxes2[:, :2]
x11, y11, x12, y12 = np.split(bboxes1, 4, axis=1)
x21, y21, x22, y22 = np.split(bboxes2, 4, axis=1)
xA = np.maximum(x11, np.transpose(x21))
yA = np.maximum(y11, np.transpose(y21))
xB = np.minimum(x12, np.transpose(x22))
yB = np.minimum(y12, np.transpose(y22))
interArea = np.maximum((xB - xA + 1e-9), 0) * np.maximum((yB - yA + 1e-9), 0)
boxAArea = (x12 - x11 + 1e-9) * (y12 - y11 + 1e-9)
boxBArea = (x22 - x21 + 1e-9) * (y22 - y21 + 1e-9)
iou = interArea / (boxAArea + np.transpose(boxBArea) - interArea)
return iou
def f_beta(tp, fp, fn, beta=2):
if tp == 0:
return 0
return (1+beta**2)*tp / ((1+beta**2)*tp + beta**2*fn+fp)
def calc_is_correct_at_iou_th(gt_bboxes, pred_bboxes, iou_th, verbose=False):
gt_bboxes = gt_bboxes.copy()
pred_bboxes = pred_bboxes.copy()
tp = 0
fp = 0
for k, pred_bbox in enumerate(pred_bboxes): # fixed in ver.7
if len(gt_bboxes) == 0:
fp += len(pred_bboxes) - k # fix in ver.7
break
ious = calc_iou(gt_bboxes, pred_bbox[None, 1:])
max_iou = ious.max()
if max_iou >= iou_th:
tp += 1
gt_bboxes = np.delete(gt_bboxes, ious.argmax(), axis=0)
else:
fp += 1
fn = len(gt_bboxes)
return tp, fp, fn
def calc_is_correct(gt_bboxes, pred_bboxes, iou_th=0.5):
"""
gt_bboxes: (N, 4) np.array in xywh format
pred_bboxes: (N, 5) np.array in conf+xywh format
"""
if len(gt_bboxes) == 0 and len(pred_bboxes) == 0:
tps, fps, fns = 0, 0, 0
return tps, fps, fns
elif len(gt_bboxes) == 0:
tps, fps, fns = 0, len(pred_bboxes), 0
return tps, fps, fns
elif len(pred_bboxes) == 0:
tps, fps, fns = 0, 0, len(gt_bboxes)
return tps, fps, fns
pred_bboxes = pred_bboxes[pred_bboxes[:,0].argsort()[::-1]] # sort by conf
tps, fps, fns = 0, 0, 0
tp, fp, fn = calc_is_correct_at_iou_th(gt_bboxes, pred_bboxes, iou_th)
tps += tp
fps += fp
fns += fn
return tps, fps, fns
def calc_f2_score(gt_bboxes_list, pred_bboxes_list, verbose=False):
"""
gt_bboxes_list: list of (N, 4) np.array in xywh format
pred_bboxes_list: list of (N, 5) np.array in conf+xywh format
"""
#f2s = []
f2_dict = {'f2':0, "P":0, "R": 0}
all_tps = [list([0] * 11) for _ in range(len(gt_bboxes_list))]
all_fps = [list([0] * 11) for _ in range(len(gt_bboxes_list))]
all_fns = [list([0] * 11) for _ in range(len(gt_bboxes_list))]
for k, iou_th in enumerate(np.arange(0.3, 0.85, 0.05)):
tps, fps, fns = 0, 0, 0
for i, (gt_bboxes, pred_bboxes) in enumerate(zip(gt_bboxes_list, pred_bboxes_list)):
tp, fp, fn = calc_is_correct(gt_bboxes, pred_bboxes, iou_th)
tps += tp
fps += fp
fns += fn
all_tps[i][k] = tp
all_fps[i][k] = fp
all_fns[i][k] = fn
if verbose:
num_gt = len(gt_bboxes)
num_pred = len(pred_bboxes)
print(f'num_gt:{num_gt:<3} num_pred:{num_pred:<3} tp:{tp:<3} fp:{fp:<3} fn:{fn:<3}')
f2 = f_beta(tps, fps, fns, beta=2)
precision = f_beta(tps, fps, fns, beta=0)
recall = f_beta(tps, fps, fns, beta=100)
f2_dict["f2_" + str(round(iou_th,3))] = f2
f2_dict["P_" + str(round(iou_th,3))] = precision
f2_dict["R_" + str(round(iou_th,3))] = recall
f2_dict['f2'] += f2 / 11
f2_dict['P'] += precision / 11
f2_dict['R'] += recall / 11
f2_dict["tps"] = all_tps
f2_dict["fps"] = all_fps
f2_dict["fns"] = all_fns
return f2_dict
def print_f2_dict(d):
print("Overall f2: {:.3f}, precision {:.3f}, recall {:.3f}".format(d['f2'], d['precision'], d['recall']))
for k, iou_th in enumerate(np.arange(0.3, 0.85, 0.05)):
print(f"IOU {iou_th:.2f}:", end=" ")
print("f2: {:.3f}, precision {:.3f}, recall {:.3f}".format(d["f2_" + str(round(iou_th,3))],
d["precision_" + str(round(iou_th,3))],
d["recall_" + str(round(iou_th,3))]))
def get_path(row, params, infer=False):
row['old_image_path'] = params['root_dir'] / f'train_images/video_{row.video_id}/{row.video_frame}.jpg'
if infer:
row['image_path'] = row["old_image_path"]
else:
row['image_path'] = params['image_dir'] / f'video_{row.video_id}_{row.video_frame}.jpg'
row['label_path'] = params['label_dir'] / f'video_{row.video_id}_{row.video_frame}.txt'
return row
def make_copy(path, params):
# TODO: fix split issue
data = str(path).split('/')
filename = data[-1]
video_id = data[-2]
new_path = params["image_dir"] / f'{video_id}_{filename}'
shutil.copy(path, new_path)
return
# https://www.kaggle.com/awsaf49/great-barrier-reef-yolov5-train
def voc2yolo(image_height, image_width, bboxes):
"""
voc => [x1, y1, x2, y1]
yolo => [xmid, ymid, w, h] (normalized)
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
bboxes[..., [0, 2]] = bboxes[..., [0, 2]]/ image_width
bboxes[..., [1, 3]] = bboxes[..., [1, 3]]/ image_height
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
bboxes[..., 0] = bboxes[..., 0] + w/2
bboxes[..., 1] = bboxes[..., 1] + h/2
bboxes[..., 2] = w
bboxes[..., 3] = h
return bboxes
def yolo2voc(image_height, image_width, bboxes):
"""
yolo => [xmid, ymid, w, h] (normalized)
voc => [x1, y1, x2, y1]
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
bboxes[..., [0, 2]] = bboxes[..., [0, 2]]* image_width
bboxes[..., [1, 3]] = bboxes[..., [1, 3]]* image_height
bboxes[..., [0, 1]] = bboxes[..., [0, 1]] - bboxes[..., [2, 3]]/2
bboxes[..., [2, 3]] = bboxes[..., [0, 1]] + bboxes[..., [2, 3]]
return bboxes
def coco2yolo(image_height, image_width, bboxes):
"""
coco => [xmin, ymin, w, h]
yolo => [xmid, ymid, w, h] (normalized)
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
# normolizinig
bboxes[..., [0, 2]]= bboxes[..., [0, 2]]/ image_width
bboxes[..., [1, 3]]= bboxes[..., [1, 3]]/ image_height
# converstion (xmin, ymin) => (xmid, ymid)
bboxes[..., [0, 1]] = bboxes[..., [0, 1]] + bboxes[..., [2, 3]]/2
return bboxes
def yolo2coco(image_height, image_width, bboxes):
"""
yolo => [xmid, ymid, w, h] (normalized)
coco => [xmin, ymin, w, h]
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
# denormalizing
bboxes[..., [0, 2]]= bboxes[..., [0, 2]]* image_width
bboxes[..., [1, 3]]= bboxes[..., [1, 3]]* image_height
# converstion (xmid, ymid) => (xmin, ymin)
bboxes[..., [0, 1]] = bboxes[..., [0, 1]] - bboxes[..., [2, 3]]/2
return bboxes
def voc2coco(bboxes, image_height=720, image_width=1280):
bboxes = voc2yolo(image_height, image_width, bboxes)
bboxes = yolo2coco(image_height, image_width, bboxes)
return bboxes
def load_image(image_path):
return cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def draw_bboxes(img, bboxes, classes, colors = None, show_classes = None, bbox_format = 'yolo', class_name = False, line_thickness = 1):
image = img.copy()
show_classes = classes if show_classes is None else show_classes
colors = (0, 255 ,0) if colors is None else colors
if bbox_format == 'yolo':
for idx in range(len(bboxes)):
bbox = bboxes[idx]
cls = classes[idx]
color = colors[idx]
if cls in show_classes:
x1 = round(float(bbox[0])*image.shape[1])
y1 = round(float(bbox[1])*image.shape[0])
w = round(float(bbox[2])*image.shape[1]/2) #w/2
h = round(float(bbox[3])*image.shape[0]/2)
voc_bbox = (x1-w, y1-h, x1+w, y1+h)
plot_one_box(voc_bbox,
image,
color = color,
label = cls if class_name else str(get_label(cls)),
line_thickness = line_thickness)
elif bbox_format == 'coco':
for idx in range(len(bboxes)):
bbox = bboxes[idx]
cls = classes[idx]
color = colors[idx]
if cls in show_classes:
x1 = int(round(bbox[0]))
y1 = int(round(bbox[1]))
w = int(round(bbox[2]))
h = int(round(bbox[3]))
voc_bbox = (x1, y1, x1+w, y1+h)
plot_one_box(voc_bbox,
image,
color = color,
label = cls,
line_thickness = line_thickness)
elif bbox_format == 'voc_pascal':
for idx in range(len(bboxes)):
bbox = bboxes[idx]
cls = classes[idx]
cls_id = class_ids[idx]
color = colors[cls_id] if type(colors) is list else colors
if cls in show_classes:
x1 = int(round(bbox[0]))
y1 = int(round(bbox[1]))
x2 = int(round(bbox[2]))
y2 = int(round(bbox[3]))
voc_bbox = (x1, y1, x2, y2)
plot_one_box(voc_bbox,
image,
color = color,
label = cls if class_name else str(cls_id),
line_thickness = line_thickness)
else:
raise ValueError('wrong bbox format')
return image
def get_bbox(annots):
bboxes = [list(annot.values()) for annot in annots]
return bboxes
def get_imgsize(row):
row['width'], row['height'] = imagesize.get(row['image_path'])
return row
# https://www.kaggle.com/diegoalejogm/great-barrier-reefs-eda-with-animations
def create_animation(ims):
fig = plt.figure(figsize=(16, 12))
plt.axis('off')
im = plt.imshow(ims[0])
def animate_func(i):
im.set_array(ims[i])
return [im]
return animation.FuncAnimation(fig, animate_func, frames = len(ims), interval = 1000//12)
# https://github.com/rbgirshick/fast-rcnn/blob/master/lib/utils/nms.py
def nms(dets, thresh):
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
# https://github.com/DocF/Soft-NMS/blob/master/soft_nms.py
def py_cpu_softnms(dets, sc, Nt=0.3, sigma=0.5, thresh=0.001, method=2):
"""
py_cpu_softnms
:param dets: boexs 坐标矩阵 format [y1, x1, y2, x2]
:param sc: 每个 boxes 对应的分数
:param Nt: iou 交叠门限
:param sigma: 使用 gaussian 函数的方差
:param thresh: 最后的分数门限
:param method: 使用的方法
:return: 留下的 boxes 的 index
"""
# indexes concatenate boxes with the last column
N = dets.shape[0]
indexes = np.array([np.arange(N)])
dets = np.concatenate((dets, indexes.T), axis=1)
# the order of boxes coordinate is [y1,x1,y2,x2]
y1 = dets[:, 0]
x1 = dets[:, 1]
y2 = dets[:, 2]
x2 = dets[:, 3]
scores = sc
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
for i in range(N):
# intermediate parameters for later parameters exchange
tBD = dets[i, :].copy()
tscore = scores[i].copy()
tarea = areas[i].copy()
pos = i + 1
#
if i != N-1:
maxscore = np.max(scores[pos:], axis=0)
maxpos = np.argmax(scores[pos:], axis=0)
else:
maxscore = scores[-1]
maxpos = 0
if tscore < maxscore:
dets[i, :] = dets[maxpos + i + 1, :]
dets[maxpos + i + 1, :] = tBD
tBD = dets[i, :]
scores[i] = scores[maxpos + i + 1]
scores[maxpos + i + 1] = tscore
tscore = scores[i]
areas[i] = areas[maxpos + i + 1]
areas[maxpos + i + 1] = tarea
tarea = areas[i]
# IoU calculate
xx1 = np.maximum(dets[i, 1], dets[pos:, 1])
yy1 = np.maximum(dets[i, 0], dets[pos:, 0])
xx2 = np.minimum(dets[i, 3], dets[pos:, 3])
yy2 = np.minimum(dets[i, 2], dets[pos:, 2])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[pos:] - inter)
# Three methods: 1.linear 2.gaussian 3.original NMS
if method == 1: # linear
weight = np.ones(ovr.shape)
weight[ovr > Nt] = weight[ovr > Nt] - ovr[ovr > Nt]
elif method == 2: # gaussian
weight = np.exp(-(ovr * ovr) / sigma)
else: # original NMS
weight = np.ones(ovr.shape)
weight[ovr > Nt] = 0
scores[pos:] = weight * scores[pos:]
# select the boxes and keep the corresponding indexes
inds = dets[:, 4][scores > thresh]
keep = inds.astype(int)
return keep
def seed_torch(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
def create_logger(filename, filemode='a'):
# better logging file - output the in terminal as well
file_handler = logging.FileHandler(filename=filename, mode=filemode)
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [file_handler, stdout_handler]
formatter = "%(asctime)s %(levelname)s: %(message)s"
datefmt = "%m/%d/%Y %I:%M:%S %p"
logging.basicConfig(format=formatter, datefmt=datefmt,
level=logging.DEBUG, handlers=handlers)
return
def save_pickle(obj, folder_path):
pickle.dump(obj, open(folder_path, 'wb'), pickle.HIGHEST_PROTOCOL)
def load_pickle(folder_path):
return pickle.load(open(folder_path, 'rb'))
def save_yaml(obj, folder_path):
obj2 = obj.copy()
for key, value in obj2.items():
if isinstance(value, Path):
obj2[key] = str(value.resolve())
else:
obj2[key] = value
with open(folder_path, 'w') as file:
yaml.dump(obj2, file)
def load_yaml(folder_path):
with open(folder_path) as file:
data = yaml.load(file, Loader=yaml.FullLoader)
return data
def load_model(params):
try:
model = torch.hub.load(params['repo'],
'custom',
path=params['ckpt_path'],
source='local',
force_reload=True) # local repo
except:
print("torch.hub.load failed, try torch.load")
model = torch.load(params['ckpt_path'])
model.conf = params['conf'] # NMS confidence threshold
model.iou = params['iou'] # NMS IoU threshold
model.classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for persons, cats and dogs
model.multi_label = False # NMS multiple labels per box
model.max_det = 50 # maximum number of detections per image
return model
def predict(model, img, size=768, augment=False, use_sahi=False):
if use_sahi:
from sahi.predict import get_sliced_prediction
results = get_sliced_prediction(
img,
model,
slice_height = 512,
slice_width = 512,
overlap_height_ratio = 0.2,
overlap_width_ratio = 0.2
)
preds = results.object_prediction_list
bboxes = np.array([pred.bbox.to_voc_bbox() for pred in preds])
else:
results = model(img, size=size, augment=augment) # custom inference size
preds = results.pandas().xyxy[0]
bboxes = preds[['xmin','ymin','xmax','ymax']].values
if len(bboxes):
height, width = img.shape[:2]
bboxes = voc2coco(bboxes,height,width).astype(int)
if use_sahi:
confs = np.array([pred.score.value for pred in preds])
else:
confs = preds.confidence.values
return bboxes, confs
else:
return np.array([]),[]
def format_prediction(bboxes, confs):
annot = ''
if len(bboxes)>0:
for idx in range(len(bboxes)):
xmin, ymin, w, h = bboxes[idx]
conf = confs[idx]
annot += f'{conf} {xmin} {ymin} {w} {h}'
annot +=' '
annot = annot.strip(' ')
return annot
def show_img(img, bboxes, confs, colors, bbox_format='yolo'):
labels = [str(round(conf,2)) for conf in confs]
img = draw_bboxes(img = img,
bboxes = bboxes,
classes = labels,
class_name = True,
colors = colors,
bbox_format = bbox_format,
line_thickness = 2)
return Image.fromarray(img)
def write_hyp(params):
with open(params["hyp_file"], mode="w") as f:
for key, val in params["hyp_param"].items():
f.write(f"{key}: {val}\n")
def class2dict(f):
return dict((name, getattr(f, name)) for name in dir(f) if not name.startswith('__'))
def upload(params):
data_version = "-".join(params["exp_name"].split("_"))
if os.path.exists(params["output_dir"] / "wandb"):
shutil.move(str(params["output_dir"] / "wandb"),
str(params["output_dir"].parent / f"{params['exp_name']}_wandb/")
)
with open(params["output_dir"] / "dataset-metadata.json", "w") as f:
f.write("{\n")
f.write(f""" "title": "{data_version}",\n""")
f.write(f""" "id": "vincentwang25/{data_version}",\n""")
f.write(""" "licenses": [\n""")
f.write(""" {\n""")
f.write(""" "name": "CC0-1.0"\n""")
f.write(""" }\n""")
f.write(""" ]\n""")
f.write("""}""")
subprocess.call(["kaggle", "datasets", "create", "-p", str(params["output_dir"]), "-r", "zip"])
def coco(df):
annotion_id = 0
images = []
annotations = []
categories = [{'id': 0, 'name': 'cots'}]
for i, row in df.iterrows():
images.append({
"id": i,
"file_name": f"video_{row['video_id']}_{row['video_frame']}.jpg",
"height": 720,
"width": 1280,
})
for bbox in row['annotations']:
annotations.append({
"id": annotion_id,
"image_id": i,
"category_id": 0,
"bbox": list(bbox.values()),
"area": bbox['width'] * bbox['height'],
"segmentation": [],
"iscrowd": 0
})
annotion_id += 1
json_file = {'categories':categories, 'images':images, 'annotations':annotations}
return json_file
def mmcfg_from_param(params):
from mmcv import Config
# model
cfg = Config.fromfile(params['hyp_param']['base_file'])
cfg.work_dir = str(params['output_dir'])
cfg.seed = 2022
cfg.gpu_ids = range(2)
cfg.load_from = params['hyp_param']['load_from']
if params['hyp_param']['model_type'] == 'faster_rcnn':
cfg.model.roi_head.bbox_head.num_classes = 1
cfg.model.roi_head.bbox_head.loss_bbox.type = params['hyp_param']['loss_fnc']
cfg.model.rpn_head.loss_bbox.type = params['hyp_param']['loss_fnc']
if params['hyp_param']['loss_fnc'] == "GIoULoss":
cfg.model.roi_head.bbox_head.reg_decoded_bbox = True
cfg.model.rpn_head.reg_decoded_bbox = True
cfg.model.train_cfg.rpn_proposal.nms.type = params['hyp_param']['nms']
cfg.model.test_cfg.rpn.nms.type = params['hyp_param']['nms']
cfg.model.test_cfg.rcnn.nms.type = params['hyp_param']['nms']
cfg.model.train_cfg.rcnn.sampler.type = params['hyp_param']['sampler']
elif params['hyp_param']['model_type'] == 'swin':
pass # already changed
elif params['hyp_param']['model_type'] == 'vfnet':
cfg.model.bbox_head.num_classes = 1
if params['hyp_param'].get("optimizer", cfg.optimizer.type) == "AdamW":
cfg.optimizer = dict(
type="AdamW",
lr=params['hyp_param'].get("lr", cfg.optimizer.lr),
weight_decay=params['hyp_param'].get(
"weight_decay", cfg.optimizer.weight_decay
),
)
else:
cfg.optimizer.lr = params['hyp_param'].get("lr", cfg.optimizer.lr)
cfg.optimizer.weight_decay = params['hyp_param'].get(
"weight_decay", cfg.optimizer.weight_decay)
cfg.lr_config = dict(
policy='CosineAnnealing',
by_epoch=False,
warmup='linear',
warmup_iters= 1000,
warmup_ratio= 1/10,
min_lr=1e-07)
# data
cfg = add_data_pipeline(cfg, params)
cfg.runner.max_epochs = params['epochs']
cfg.evaluation.start = 1
cfg.evaluation.interval = 1
cfg.evaluation.save_best='auto'
cfg.evaluation.metric ='bbox'
cfg.checkpoint_config.interval = -1
cfg.log_config.interval = 500
cfg.log_config.with_step = True
cfg.log_config.by_epoch = True
cfg.log_config.hooks =[dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')]
cfg.workflow = [('train',1)]
logging.info(str(cfg))
return cfg
def add_data_pipeline(cfg, params):
cfg.dataset_type = 'COCODataset'
cfg.classes = ('cots',)
cfg.data_root = str(params['data_path'].resolve())
params['aug_param']['img_scale'] = (params['img_size'], params['img_size'])
cfg.img_scale = params['aug_param']['img_scale']
cfg.dataset_type = 'CocoDataset'
cfg.filter_empty_gt = False
cfg.data.filter_empty_gt = False
cfg.data.train.type = cfg.dataset_type
cfg.data.train.classes = cfg.classes
cfg.data.train.ann_file = str(params["cfg_dir"] / 'annotations_train.json')
cfg.data.train.img_prefix = cfg.data_root + '/images/'
cfg.data.train.filter_empty_gt = False
cfg.data.test.type = cfg.dataset_type
cfg.data.test.classes = cfg.classes
cfg.data.test.ann_file = str(params["cfg_dir"] / 'annotations_valid.json')
cfg.data.test.img_prefix = cfg.data_root + '/images/'
cfg.data.test.filter_empty_gt = False
cfg.data.val.type = cfg.dataset_type
cfg.data.val.classes = cfg.classes
cfg.data.val.ann_file = str(params["cfg_dir"] / 'annotations_valid.json')
cfg.data.val.img_prefix = cfg.data_root + '/images/'
cfg.data.val.filter_empty_gt = False
cfg.data.samples_per_gpu = params['batch'] // len(cfg.gpu_ids)
cfg.data.workers_per_gpu = params['workers'] // len(cfg.gpu_ids)
# train pipeline
albu_train_transforms = get_albu_transforms(params['aug_param'], is_train=True)
if params['aug_param']['use_mixup'] or params['aug_param']['use_mosaic']:
train_pipeline = []
else:
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)]
if params['aug_param']['use_mosaic']:
train_pipeline.append(dict(type='Mosaic', img_scale=cfg.img_scale, pad_val=114.0))
else:
train_pipeline.append(dict(type='Resize', img_scale=cfg.img_scale, keep_ratio=False))
train_pipeline = train_pipeline +[
dict(type='Pad', size_divisor=32),
dict(
type='Albu',
transforms=albu_train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_labels'],
min_visibility=0.0,
filter_lost_elements=True),
keymap={
'img': 'image',
'gt_bboxes': 'bboxes'
},
update_pad_shape=False,
skip_img_without_anno=False
)]
if params['aug_param']['use_mixup']:
train_pipeline.append(dict(type='MixUp', img_scale=cfg.img_scale, ratio_range=(0.8, 1.6), pad_val=114.0))
train_pipeline = train_pipeline +\
[
dict(type='Normalize', **cfg.img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels'],
meta_keys=('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'img_norm_cfg')),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=cfg.img_scale,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **cfg.img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=[cfg.img_scale],
flip=[False],
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Pad', size_divisor=32),
dict(type='RandomFlip', direction='horizontal'),
dict(type='Normalize', **cfg.img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
cfg.train_pipeline = train_pipeline
cfg.val_pipeline = val_pipeline
cfg.test_pipeline = test_pipeline
if params['aug_param']['use_mixup'] or params['aug_param']['use_mosaic']:
cfg.train_dataset = dict(
type='MultiImageMixDataset',
dataset=dict(
type=cfg.dataset_type,
classes=cfg.classes,
ann_file=str(params["cfg_dir"] / 'annotations_train.json'),
img_prefix=cfg.data_root + '/images/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)
],
filter_empty_gt=False,
),
pipeline=cfg.train_pipeline
)
cfg.data.train = cfg.train_dataset
else:
cfg.data.train.pipeline = cfg.train_pipeline
cfg.data.val.pipeline = cfg.val_pipeline
cfg.data.test.pipeline = cfg.test_pipeline
return cfg
def find_ckp(output_dir):
return glob(output_dir / "best*.pth")[0]
|
GetChromeProxyRequestHeaderValue
|
Get a specific Chrome-Proxy request header value.
Returns:
The value for a specific Chrome-Proxy request header value for a
given key. Returns None if no such key is present.
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
from common import network_metrics
from telemetry.page import page_test
from telemetry.value import scalar
CHROME_PROXY_VIA_HEADER = 'Chrome-Compression-Proxy'
class ChromeProxyMetricException(page_test.MeasurementFailure):
pass
class ChromeProxyResponse(network_metrics.HTTPResponse):
""" Represents an HTTP response from a timeline event."""
def __init__(self, event):
super(ChromeProxyResponse, self).__init__(event)
def ShouldHaveChromeProxyViaHeader(self):
resp = self.response
# Ignore https and data url
if resp.url.startswith('https') or resp.url.startswith('data:'):
return False
# Ignore 304 Not Modified and cache hit.
if resp.status == 304 or resp.served_from_cache:
return False
# Ignore invalid responses that don't have any header. Log a warning.
if not resp.headers:
logging.warning('response for %s does not any have header '
'(refer=%s, status=%s)',
resp.url, resp.GetHeader('Referer'), resp.status)
return False
return True
def HasResponseHeader(self, key, value):
response_header = self.response.GetHeader(key)
if not response_header:
return False
values = [v.strip() for v in response_header.split(',')]
return any(v == value for v in values)
def HasRequestHeader(self, key, value):
if key not in self.response.request_headers:
return False
request_header = self.response.request_headers[key]
values = [v.strip() for v in request_header.split(',')]
return any(v == value for v in values)
def HasChromeProxyViaHeader(self):
via_header = self.response.GetHeader('Via')
if not via_header:
return False
vias = [v.strip(' ') for v in via_header.split(',')]
# The Via header is valid if it has a 4-character version prefix followed by
# the proxy name, for example, "1.1 Chrome-Compression-Proxy".
return any(v[4:] == CHROME_PROXY_VIA_HEADER for v in vias)
def HasExtraViaHeader(self, extra_header):
return self.HasResponseHeader('Via', extra_header)
def IsValidByViaHeader(self):
return (not self.ShouldHaveChromeProxyViaHeader() or
self.HasChromeProxyViaHeader())
# MASKED: GetChromeProxyRequestHeaderValue function (lines 71-87)
def GetChromeProxyClientType(self):
"""Get the client type directive from the Chrome-Proxy request header.
Returns:
The client type directive from the Chrome-Proxy request header for the
request that lead to this response. For example, if the request header
"Chrome-Proxy: c=android" is present, then this method would return
"android". Returns None if no client type directive is present.
"""
return self.GetChromeProxyRequestHeaderValue('c')
def HasChromeProxyLoFiRequest(self):
return self.HasRequestHeader('Chrome-Proxy', "q=low")
def HasChromeProxyLoFiResponse(self):
return self.HasResponseHeader('Chrome-Proxy', "q=low")
def HasChromeProxyPassThroughRequest(self):
return self.HasRequestHeader('Chrome-Proxy', "pass-through")
|
def GetChromeProxyRequestHeaderValue(self, key):
"""Get a specific Chrome-Proxy request header value.
Returns:
The value for a specific Chrome-Proxy request header value for a
given key. Returns None if no such key is present.
"""
if 'Chrome-Proxy' not in self.response.request_headers:
return None
chrome_proxy_request_header = self.response.request_headers['Chrome-Proxy']
values = [v.strip() for v in chrome_proxy_request_header.split(',')]
for value in values:
kvp = value.split('=', 1)
if len(kvp) == 2 and kvp[0].strip() == key:
return kvp[1].strip()
return None
| 71 | 87 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
from common import network_metrics
from telemetry.page import page_test
from telemetry.value import scalar
CHROME_PROXY_VIA_HEADER = 'Chrome-Compression-Proxy'
class ChromeProxyMetricException(page_test.MeasurementFailure):
pass
class ChromeProxyResponse(network_metrics.HTTPResponse):
""" Represents an HTTP response from a timeline event."""
def __init__(self, event):
super(ChromeProxyResponse, self).__init__(event)
def ShouldHaveChromeProxyViaHeader(self):
resp = self.response
# Ignore https and data url
if resp.url.startswith('https') or resp.url.startswith('data:'):
return False
# Ignore 304 Not Modified and cache hit.
if resp.status == 304 or resp.served_from_cache:
return False
# Ignore invalid responses that don't have any header. Log a warning.
if not resp.headers:
logging.warning('response for %s does not any have header '
'(refer=%s, status=%s)',
resp.url, resp.GetHeader('Referer'), resp.status)
return False
return True
def HasResponseHeader(self, key, value):
response_header = self.response.GetHeader(key)
if not response_header:
return False
values = [v.strip() for v in response_header.split(',')]
return any(v == value for v in values)
def HasRequestHeader(self, key, value):
if key not in self.response.request_headers:
return False
request_header = self.response.request_headers[key]
values = [v.strip() for v in request_header.split(',')]
return any(v == value for v in values)
def HasChromeProxyViaHeader(self):
via_header = self.response.GetHeader('Via')
if not via_header:
return False
vias = [v.strip(' ') for v in via_header.split(',')]
# The Via header is valid if it has a 4-character version prefix followed by
# the proxy name, for example, "1.1 Chrome-Compression-Proxy".
return any(v[4:] == CHROME_PROXY_VIA_HEADER for v in vias)
def HasExtraViaHeader(self, extra_header):
return self.HasResponseHeader('Via', extra_header)
def IsValidByViaHeader(self):
return (not self.ShouldHaveChromeProxyViaHeader() or
self.HasChromeProxyViaHeader())
def GetChromeProxyRequestHeaderValue(self, key):
"""Get a specific Chrome-Proxy request header value.
Returns:
The value for a specific Chrome-Proxy request header value for a
given key. Returns None if no such key is present.
"""
if 'Chrome-Proxy' not in self.response.request_headers:
return None
chrome_proxy_request_header = self.response.request_headers['Chrome-Proxy']
values = [v.strip() for v in chrome_proxy_request_header.split(',')]
for value in values:
kvp = value.split('=', 1)
if len(kvp) == 2 and kvp[0].strip() == key:
return kvp[1].strip()
return None
def GetChromeProxyClientType(self):
"""Get the client type directive from the Chrome-Proxy request header.
Returns:
The client type directive from the Chrome-Proxy request header for the
request that lead to this response. For example, if the request header
"Chrome-Proxy: c=android" is present, then this method would return
"android". Returns None if no client type directive is present.
"""
return self.GetChromeProxyRequestHeaderValue('c')
def HasChromeProxyLoFiRequest(self):
return self.HasRequestHeader('Chrome-Proxy', "q=low")
def HasChromeProxyLoFiResponse(self):
return self.HasResponseHeader('Chrome-Proxy', "q=low")
def HasChromeProxyPassThroughRequest(self):
return self.HasRequestHeader('Chrome-Proxy', "pass-through")
|
get
|
Get an existing PrivateEndpoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PrivateEndpointArgs', 'PrivateEndpoint']
@pulumi.input_type
class PrivateEndpointArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
manual_private_link_service_connections: Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]]] = None,
private_endpoint_name: Optional[pulumi.Input[str]] = None,
private_link_service_connections: Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]]] = None,
subnet: Optional[pulumi.Input['SubnetArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a PrivateEndpoint resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]] manual_private_link_service_connections: A grouping of information about the connection to the remote resource. Used when the network admin does not have access to approve connections to the remote resource.
:param pulumi.Input[str] private_endpoint_name: The name of the private endpoint.
:param pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]] private_link_service_connections: A grouping of information about the connection to the remote resource.
:param pulumi.Input['SubnetArgs'] subnet: The ID of the subnet from which the private IP will be allocated.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if manual_private_link_service_connections is not None:
pulumi.set(__self__, "manual_private_link_service_connections", manual_private_link_service_connections)
if private_endpoint_name is not None:
pulumi.set(__self__, "private_endpoint_name", private_endpoint_name)
if private_link_service_connections is not None:
pulumi.set(__self__, "private_link_service_connections", private_link_service_connections)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="manualPrivateLinkServiceConnections")
def manual_private_link_service_connections(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]]]:
"""
A grouping of information about the connection to the remote resource. Used when the network admin does not have access to approve connections to the remote resource.
"""
return pulumi.get(self, "manual_private_link_service_connections")
@manual_private_link_service_connections.setter
def manual_private_link_service_connections(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]]]):
pulumi.set(self, "manual_private_link_service_connections", value)
@property
@pulumi.getter(name="privateEndpointName")
def private_endpoint_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private endpoint.
"""
return pulumi.get(self, "private_endpoint_name")
@private_endpoint_name.setter
def private_endpoint_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_name", value)
@property
@pulumi.getter(name="privateLinkServiceConnections")
def private_link_service_connections(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]]]:
"""
A grouping of information about the connection to the remote resource.
"""
return pulumi.get(self, "private_link_service_connections")
@private_link_service_connections.setter
def private_link_service_connections(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]]]):
pulumi.set(self, "private_link_service_connections", value)
@property
@pulumi.getter
def subnet(self) -> Optional[pulumi.Input['SubnetArgs']]:
"""
The ID of the subnet from which the private IP will be allocated.
"""
return pulumi.get(self, "subnet")
@subnet.setter
def subnet(self, value: Optional[pulumi.Input['SubnetArgs']]):
pulumi.set(self, "subnet", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class PrivateEndpoint(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
manual_private_link_service_connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionArgs']]]]] = None,
private_endpoint_name: Optional[pulumi.Input[str]] = None,
private_link_service_connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subnet: Optional[pulumi.Input[pulumi.InputType['SubnetArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Private endpoint resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionArgs']]]] manual_private_link_service_connections: A grouping of information about the connection to the remote resource. Used when the network admin does not have access to approve connections to the remote resource.
:param pulumi.Input[str] private_endpoint_name: The name of the private endpoint.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionArgs']]]] private_link_service_connections: A grouping of information about the connection to the remote resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[pulumi.InputType['SubnetArgs']] subnet: The ID of the subnet from which the private IP will be allocated.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateEndpointArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Private endpoint resource.
:param str resource_name: The name of the resource.
:param PrivateEndpointArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateEndpointArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
manual_private_link_service_connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionArgs']]]]] = None,
private_endpoint_name: Optional[pulumi.Input[str]] = None,
private_link_service_connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subnet: Optional[pulumi.Input[pulumi.InputType['SubnetArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateEndpointArgs.__new__(PrivateEndpointArgs)
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
__props__.__dict__["manual_private_link_service_connections"] = manual_private_link_service_connections
__props__.__dict__["private_endpoint_name"] = private_endpoint_name
__props__.__dict__["private_link_service_connections"] = private_link_service_connections
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["subnet"] = subnet
__props__.__dict__["tags"] = tags
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_interfaces"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20190901:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20180801:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20180801:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20181001:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20181001:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20181101:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20181101:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20181201:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20181201:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20190201:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20190201:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20190401:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20190401:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20190601:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20190601:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20190701:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20190701:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20190801:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20190801:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20191101:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20191101:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20191201:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20191201:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20200301:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20200301:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20200401:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20200401:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20200501:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20200501:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20200601:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20200601:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20200701:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20200701:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20200801:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20200801:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20201101:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20201101:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20210201:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20210201:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20210301:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20210301:PrivateEndpoint")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpoint, __self__).__init__(
'azure-native:network/v20190901:PrivateEndpoint',
resource_name,
__props__,
opts)
# MASKED: get function (lines 246-272)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="manualPrivateLinkServiceConnections")
def manual_private_link_service_connections(self) -> pulumi.Output[Optional[Sequence['outputs.PrivateLinkServiceConnectionResponse']]]:
"""
A grouping of information about the connection to the remote resource. Used when the network admin does not have access to approve connections to the remote resource.
"""
return pulumi.get(self, "manual_private_link_service_connections")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> pulumi.Output[Sequence['outputs.NetworkInterfaceResponse']]:
"""
An array of references to the network interfaces created for this private endpoint.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="privateLinkServiceConnections")
def private_link_service_connections(self) -> pulumi.Output[Optional[Sequence['outputs.PrivateLinkServiceConnectionResponse']]]:
"""
A grouping of information about the connection to the remote resource.
"""
return pulumi.get(self, "private_link_service_connections")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the private endpoint resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def subnet(self) -> pulumi.Output[Optional['outputs.SubnetResponse']]:
"""
The ID of the subnet from which the private IP will be allocated.
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
|
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpoint':
"""
Get an existing PrivateEndpoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointArgs.__new__(PrivateEndpointArgs)
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["manual_private_link_service_connections"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_interfaces"] = None
__props__.__dict__["private_link_service_connections"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["subnet"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return PrivateEndpoint(resource_name, opts=opts, __props__=__props__)
| 246 | 272 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PrivateEndpointArgs', 'PrivateEndpoint']
@pulumi.input_type
class PrivateEndpointArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
manual_private_link_service_connections: Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]]] = None,
private_endpoint_name: Optional[pulumi.Input[str]] = None,
private_link_service_connections: Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]]] = None,
subnet: Optional[pulumi.Input['SubnetArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a PrivateEndpoint resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]] manual_private_link_service_connections: A grouping of information about the connection to the remote resource. Used when the network admin does not have access to approve connections to the remote resource.
:param pulumi.Input[str] private_endpoint_name: The name of the private endpoint.
:param pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]] private_link_service_connections: A grouping of information about the connection to the remote resource.
:param pulumi.Input['SubnetArgs'] subnet: The ID of the subnet from which the private IP will be allocated.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if manual_private_link_service_connections is not None:
pulumi.set(__self__, "manual_private_link_service_connections", manual_private_link_service_connections)
if private_endpoint_name is not None:
pulumi.set(__self__, "private_endpoint_name", private_endpoint_name)
if private_link_service_connections is not None:
pulumi.set(__self__, "private_link_service_connections", private_link_service_connections)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="manualPrivateLinkServiceConnections")
def manual_private_link_service_connections(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]]]:
"""
A grouping of information about the connection to the remote resource. Used when the network admin does not have access to approve connections to the remote resource.
"""
return pulumi.get(self, "manual_private_link_service_connections")
@manual_private_link_service_connections.setter
def manual_private_link_service_connections(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]]]):
pulumi.set(self, "manual_private_link_service_connections", value)
@property
@pulumi.getter(name="privateEndpointName")
def private_endpoint_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private endpoint.
"""
return pulumi.get(self, "private_endpoint_name")
@private_endpoint_name.setter
def private_endpoint_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_name", value)
@property
@pulumi.getter(name="privateLinkServiceConnections")
def private_link_service_connections(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]]]:
"""
A grouping of information about the connection to the remote resource.
"""
return pulumi.get(self, "private_link_service_connections")
@private_link_service_connections.setter
def private_link_service_connections(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]]]):
pulumi.set(self, "private_link_service_connections", value)
@property
@pulumi.getter
def subnet(self) -> Optional[pulumi.Input['SubnetArgs']]:
"""
The ID of the subnet from which the private IP will be allocated.
"""
return pulumi.get(self, "subnet")
@subnet.setter
def subnet(self, value: Optional[pulumi.Input['SubnetArgs']]):
pulumi.set(self, "subnet", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class PrivateEndpoint(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
manual_private_link_service_connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionArgs']]]]] = None,
private_endpoint_name: Optional[pulumi.Input[str]] = None,
private_link_service_connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subnet: Optional[pulumi.Input[pulumi.InputType['SubnetArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Private endpoint resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionArgs']]]] manual_private_link_service_connections: A grouping of information about the connection to the remote resource. Used when the network admin does not have access to approve connections to the remote resource.
:param pulumi.Input[str] private_endpoint_name: The name of the private endpoint.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionArgs']]]] private_link_service_connections: A grouping of information about the connection to the remote resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[pulumi.InputType['SubnetArgs']] subnet: The ID of the subnet from which the private IP will be allocated.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateEndpointArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Private endpoint resource.
:param str resource_name: The name of the resource.
:param PrivateEndpointArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateEndpointArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
manual_private_link_service_connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionArgs']]]]] = None,
private_endpoint_name: Optional[pulumi.Input[str]] = None,
private_link_service_connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subnet: Optional[pulumi.Input[pulumi.InputType['SubnetArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateEndpointArgs.__new__(PrivateEndpointArgs)
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
__props__.__dict__["manual_private_link_service_connections"] = manual_private_link_service_connections
__props__.__dict__["private_endpoint_name"] = private_endpoint_name
__props__.__dict__["private_link_service_connections"] = private_link_service_connections
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["subnet"] = subnet
__props__.__dict__["tags"] = tags
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_interfaces"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20190901:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20180801:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20180801:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20181001:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20181001:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20181101:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20181101:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20181201:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20181201:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20190201:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20190201:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20190401:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20190401:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20190601:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20190601:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20190701:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20190701:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20190801:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20190801:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20191101:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20191101:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20191201:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20191201:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20200301:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20200301:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20200401:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20200401:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20200501:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20200501:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20200601:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20200601:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20200701:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20200701:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20200801:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20200801:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20201101:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20201101:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20210201:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20210201:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20210301:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20210301:PrivateEndpoint")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpoint, __self__).__init__(
'azure-native:network/v20190901:PrivateEndpoint',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpoint':
"""
Get an existing PrivateEndpoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointArgs.__new__(PrivateEndpointArgs)
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["manual_private_link_service_connections"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_interfaces"] = None
__props__.__dict__["private_link_service_connections"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["subnet"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return PrivateEndpoint(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="manualPrivateLinkServiceConnections")
def manual_private_link_service_connections(self) -> pulumi.Output[Optional[Sequence['outputs.PrivateLinkServiceConnectionResponse']]]:
"""
A grouping of information about the connection to the remote resource. Used when the network admin does not have access to approve connections to the remote resource.
"""
return pulumi.get(self, "manual_private_link_service_connections")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> pulumi.Output[Sequence['outputs.NetworkInterfaceResponse']]:
"""
An array of references to the network interfaces created for this private endpoint.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="privateLinkServiceConnections")
def private_link_service_connections(self) -> pulumi.Output[Optional[Sequence['outputs.PrivateLinkServiceConnectionResponse']]]:
"""
A grouping of information about the connection to the remote resource.
"""
return pulumi.get(self, "private_link_service_connections")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the private endpoint resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def subnet(self) -> pulumi.Output[Optional['outputs.SubnetResponse']]:
"""
The ID of the subnet from which the private IP will be allocated.
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.