response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
str->dict Information for CKPlayer API content.
def ckplayer_get_info_by_xml(ckinfo): """str->dict Information for CKPlayer API content.""" e = ET.XML(ckinfo) video_dict = {'title': '', #'duration': 0, 'links': [], 'size': 0, 'flashvars': '',} dictified = dictify(e)['ckplayer'] if 'info' in dictified: if '_text' in dictified['info'][0]['title'][0]: #title video_dict['title'] = dictified['info'][0]['title'][0]['_text'].strip() #if dictify(e)['ckplayer']['info'][0]['title'][0]['_text'].strip(): #duration #video_dict['title'] = dictify(e)['ckplayer']['info'][0]['title'][0]['_text'].strip() if '_text' in dictified['video'][0]['size'][0]: #size exists for 1 piece video_dict['size'] = sum([int(i['size'][0]['_text']) for i in dictified['video']]) if '_text' in dictified['video'][0]['file'][0]: #link exist video_dict['links'] = [i['file'][0]['_text'].strip() for i in dictified['video']] if '_text' in dictified['flashvars'][0]: video_dict['flashvars'] = dictified['flashvars'][0]['_text'].strip() return video_dict
Downloads Dailymotion videos by URL.
def dailymotion_download(url, output_dir='.', merge=True, info_only=False, **kwargs): """Downloads Dailymotion videos by URL. """ html = get_content(rebuilt_url(url)) info = json.loads(match1(html, r'qualities":({.+?}),"')) title = match1(html, r'"video_title"\s*:\s*"([^"]+)"') or \ match1(html, r'"title"\s*:\s*"([^"]+)"') title = unicodize(title) for quality in ['1080','720','480','380','240','144','auto']: try: real_url = info[quality][1]["url"] if real_url: break except KeyError: pass mime, ext, size = url_info(real_url) print_info(site_info, title, mime, size) if not info_only: download_urls([real_url], title, ext, size, output_dir=output_dir, merge=merge)
From http://cdn37.atwikiimg.com/sitescript/pub/dksitescript/FC2.site.js Also com.hps.util.fc2.FC2EncrptUtil.makeMimiLocal L110
def makeMimi(upid): """From http://cdn37.atwikiimg.com/sitescript/pub/dksitescript/FC2.site.js Also com.hps.util.fc2.FC2EncrptUtil.makeMimiLocal L110""" strSeed = "gGddgPfeaf_gzyr" prehash = upid + "_" + strSeed return md5(prehash.encode('utf-8')).hexdigest()
wrapper
def fc2video_download(url, output_dir = '.', merge = True, info_only = False, **kwargs): """wrapper""" #'http://video.fc2.com/en/content/20151021bTVKnbEw' #'http://xiaojiadianvideo.asia/content/20151021bTVKnbEw' #'http://video.fc2.com/ja/content/20151021bTVKnbEw' #'http://video.fc2.com/tw/content/20151021bTVKnbEw' hostname = urlparse(url).hostname if not ('fc2.com' in hostname or 'xiaojiadianvideo.asia' in hostname): return False upid = match1(url, r'.+/content/(\w+)') fc2video_download_by_upid(upid, output_dir, merge, info_only)
Source: Android mobile
def miaopai_download_by_fid(fid, output_dir = '.', merge = False, info_only = False, **kwargs): '''Source: Android mobile''' page_url = 'https://video.weibo.com/show?fid=' + fid + '&type=mp4' mobile_page = get_content(page_url, headers=fake_headers_mobile) url = match1(mobile_page, r'<video id=.*?src=[\'"](.*?)[\'"]\W') if url is None: wb_mp = re.search(r'<script src=([\'"])(.+?wb_mp\.js)\1>', mobile_page).group(2) return miaopai_download_by_wbmp(wb_mp, fid, output_dir=output_dir, merge=merge, info_only=info_only, total_size=None, **kwargs) title = match1(mobile_page, r'<title>((.|\n)+?)</title>') if not title: title = fid title = title.replace('\n', '_') ext, size = 'mp4', url_info(url)[2] print_info(site_info, title, ext, size) if not info_only: download_urls([url], title, ext, total_size=None, output_dir=output_dir, merge=merge)
str->list Convert XML to URL List. From Biligrab.
def sina_xml_to_url_list(xml_data): """str->list Convert XML to URL List. From Biligrab. """ rawurl = [] dom = parseString(xml_data) for node in dom.getElementsByTagName('durl'): url = node.getElementsByTagName('url')[0] rawurl.append(url.childNodes[0].data) return rawurl
str->str
def showroom_get_roomid_by_room_url_key(room_url_key): """str->str""" fake_headers_mobile = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'UTF-8,*;q=0.5', 'Accept-Encoding': 'gzip,deflate,sdch', 'Accept-Language': 'en-US,en;q=0.8', 'User-Agent': 'Mozilla/5.0 (Linux; Android 4.4.2; Nexus 4 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.114 Mobile Safari/537.36' } webpage_url = 'https://www.showroom-live.com/' + room_url_key html = get_content(webpage_url, headers = fake_headers_mobile) roomid = match1(html, r'room\?room_id\=(\d+)') assert roomid return roomid
Source: Android mobile
def showroom_download_by_room_id(room_id, output_dir = '.', merge = False, info_only = False, **kwargs): '''Source: Android mobile''' while True: timestamp = str(int(time() * 1000)) api_endpoint = 'https://www.showroom-live.com/api/live/streaming_url?room_id={room_id}&_={timestamp}'.format(room_id = room_id, timestamp = timestamp) html = get_content(api_endpoint) html = json.loads(html) #{'streaming_url_list': [{'url': 'rtmp://52.197.69.198:1935/liveedge', 'id': 1, 'label': 'original spec(low latency)', 'is_default': True, 'type': 'rtmp', 'stream_name': '7656a6d5baa1d77075c971f6d8b6dc61b979fc913dc5fe7cc1318281793436ed'}, {'url': 'http://52.197.69.198:1935/liveedge/7656a6d5baa1d77075c971f6d8b6dc61b979fc913dc5fe7cc1318281793436ed/playlist.m3u8', 'is_default': True, 'id': 2, 'type': 'hls', 'label': 'original spec'}, {'url': 'rtmp://52.197.69.198:1935/liveedge', 'id': 3, 'label': 'low spec(low latency)', 'is_default': False, 'type': 'rtmp', 'stream_name': '7656a6d5baa1d77075c971f6d8b6dc61b979fc913dc5fe7cc1318281793436ed_low'}, {'url': 'http://52.197.69.198:1935/liveedge/7656a6d5baa1d77075c971f6d8b6dc61b979fc913dc5fe7cc1318281793436ed_low/playlist.m3u8', 'is_default': False, 'id': 4, 'type': 'hls', 'label': 'low spec'}]} if len(html) >= 1: break log.w('The live show is currently offline.') sleep(1) #This is mainly for testing the M3U FFmpeg parser so I would ignore any non-m3u ones stream_url = [i['url'] for i in html['streaming_url_list'] if i['is_default'] and i['type'] == 'hls'][0] assert stream_url #title title = '' profile_api = 'https://www.showroom-live.com/api/room/profile?room_id={room_id}'.format(room_id = room_id) html = loads(get_content(profile_api)) try: title = html['main_name'] except KeyError: title = 'Showroom_{room_id}'.format(room_id = room_id) type_, ext, size = url_info(stream_url) print_info(site_info, title, type_, size) if not info_only: download_url_ffmpeg(url=stream_url, title=title, ext= 'mp4', output_dir=output_dir)
Downloads a Sina video by its unique vid. http://video.sina.com.cn/
def sina_download_by_vid(vid, title=None, output_dir='.', merge=True, info_only=False): """Downloads a Sina video by its unique vid. http://video.sina.com.cn/ """ xml = api_req(vid) urls, name, size = video_info(xml) if urls is None: log.wtf(name) title = name print_info(site_info, title, 'flv', size) if not info_only: download_urls(urls, title, 'flv', size, output_dir = output_dir, merge = merge)
Downloads a Sina video by its unique vkey. http://video.sina.com/
def sina_download_by_vkey(vkey, title=None, output_dir='.', merge=True, info_only=False): """Downloads a Sina video by its unique vkey. http://video.sina.com/ """ url = 'http://video.sina.com/v/flvideo/%s_0.flv' % vkey type, ext, size = url_info(url) print_info(site_info, title, 'flv', size) if not info_only: download_urls([url], title, 'flv', size, output_dir = output_dir, merge = merge)
Downloads Sina videos by URL.
def sina_download(url, output_dir='.', merge=True, info_only=False, **kwargs): """Downloads Sina videos by URL. """ if 'news.sina.com.cn/zxt' in url: sina_zxt(url, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs) return vid = match1(url, r'vid=(\d+)') if vid is None: video_page = get_content(url) vid = hd_vid = match1(video_page, r'hd_vid\s*:\s*\'([^\']+)\'') if hd_vid == '0': vids = match1(video_page, r'[^\w]vid\s*:\s*\'([^\']+)\'').split('|') vid = vids[-1] if vid is None: vid = match1(video_page, r'vid:"?(\d+)"?') if vid: #title = match1(video_page, r'title\s*:\s*\'([^\']+)\'') sina_download_by_vid(vid, output_dir=output_dir, merge=merge, info_only=info_only) else: vkey = match1(video_page, r'vkey\s*:\s*"([^"]+)"') if vkey is None: vid = match1(url, r'#(\d+)') sina_download_by_vid(vid, output_dir=output_dir, merge=merge, info_only=info_only) return title = match1(video_page, r'title\s*:\s*"([^"]+)"') sina_download_by_vkey(vkey, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
http://stackoverflow.com/a/30923963/2946714
def dictify(r,root=True): """http://stackoverflow.com/a/30923963/2946714""" if root: return {r.tag : dictify(r, False)} d=copy(r.attrib) if r.text: d["_text"]=r.text for x in r.findall("./*"): if x.tag not in d: d[x.tag]=[] d[x.tag].append(dictify(x,False)) return d
video page
def ucas_download_single(url, output_dir = '.', merge = False, info_only = False, **kwargs): '''video page''' html = get_content(url) # resourceID is UUID resourceID = re.findall( r'resourceID":"([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})', html)[0] assert resourceID != '', 'Cannot find resourceID!' title = match1(html, r'<div class="bc-h">(.+)</div>') url_lists = _ucas_get_url_lists_by_resourceID(resourceID) assert url_lists, 'Cannot find any URL of such class!' for k, part in enumerate(url_lists): part_title = title + '_' + str(k) print_info(site_info, part_title, 'flv', 0) if not info_only: download_urls(part, part_title, 'flv', total_size=None, output_dir=output_dir, merge=merge)
course page
def ucas_download_playlist(url, output_dir = '.', merge = False, info_only = False, **kwargs): '''course page''' html = get_content(url) parts = re.findall( r'(getplaytitle.do\?.+)"', html) assert parts, 'No part found!' for part_path in parts: ucas_download('http://v.ucas.ac.cn/course/' + part_path, output_dir=output_dir, merge=merge, info_only=info_only)
Get item_id
def veoh_download(url, output_dir = '.', merge = False, info_only = False, **kwargs): '''Get item_id''' if re.match(r'http://www.veoh.com/watch/\w+', url): item_id = match1(url, r'http://www.veoh.com/watch/(\w+)') elif re.match(r'http://www.veoh.com/m/watch.php\?v=\.*', url): item_id = match1(url, r'http://www.veoh.com/m/watch.php\?v=(\w+)') else: raise NotImplementedError('Cannot find item ID') veoh_download_by_id(item_id, output_dir = '.', merge = False, info_only = info_only, **kwargs)
Source: Android mobile
def veoh_download_by_id(item_id, output_dir = '.', merge = False, info_only = False, **kwargs): """Source: Android mobile""" webpage_url = 'http://www.veoh.com/m/watch.php?v={item_id}&quality=1'.format(item_id = item_id) #grab download URL a = get_content(webpage_url, decoded=True) url = match1(a, r'<source src="(.*?)\"\W') #grab title title = match1(a, r'<meta property="og:title" content="([^"]*)"') type_, ext, size = url_info(url) print_info(site_info, title, type_, size) if not info_only: download_urls([url], title, ext, total_size=None, output_dir=output_dir, merge=merge)
str->None
def vimeo_download_by_channel(url, output_dir='.', merge=False, info_only=False, **kwargs): """str->None""" # https://vimeo.com/channels/464686 channel_id = match1(url, r'http://vimeo.com/channels/(\w+)') vimeo_download_by_channel_id(channel_id, output_dir, merge, info_only, **kwargs)
str/int->None
def vimeo_download_by_channel_id(channel_id, output_dir='.', merge=False, info_only=False, **kwargs): """str/int->None""" html = get_content('https://api.vimeo.com/channels/{channel_id}/videos?access_token={access_token}'.format(channel_id=channel_id, access_token=access_token)) data = loads(html) id_list = [] #print(data) for i in data['data']: id_list.append(match1(i['uri'], r'/videos/(\w+)')) for id in id_list: try: vimeo_download_by_id(id, None, output_dir, merge, info_only, **kwargs) except urllib.error.URLError as e: log.w('{} failed with {}'.format(id, e))
try: # normal Vimeo video html = get_content('https://vimeo.com/' + id) cfg_patt = r'clip_page_config\s*=\s*(\{.+?\});' cfg = json.loads(match1(html, cfg_patt)) video_page = get_content(cfg['player']['config_url'], headers=fake_headers) title = cfg['clip']['title'] info = loads(video_page) except: # embedded player - referer may be required if 'referer' in kwargs: fake_headers['Referer'] = kwargs['referer'] video_page = get_content('http://player.vimeo.com/video/%s' % id, headers=fake_headers) title = r1(r'<title>([^<]+)</title>', video_page) info = loads(match1(video_page, r'var t=(\{.+?\});')) streams = info['request']['files']['progressive'] streams = sorted(streams, key=lambda i: i['height']) url = streams[-1]['url'] type, ext, size = url_info(url, faker=True) print_info(site_info, title, type, size) if not info_only: download_urls([url], title, ext, size, output_dir, merge=merge, faker=True)
def vimeo_download_by_id(id, title=None, output_dir='.', merge=True, info_only=False, **kwargs): ''' try: # normal Vimeo video html = get_content('https://vimeo.com/' + id) cfg_patt = r'clip_page_config\s*=\s*(\{.+?\});' cfg = json.loads(match1(html, cfg_patt)) video_page = get_content(cfg['player']['config_url'], headers=fake_headers) title = cfg['clip']['title'] info = loads(video_page) except: # embedded player - referer may be required if 'referer' in kwargs: fake_headers['Referer'] = kwargs['referer'] video_page = get_content('http://player.vimeo.com/video/%s' % id, headers=fake_headers) title = r1(r'<title>([^<]+)</title>', video_page) info = loads(match1(video_page, r'var t=(\{.+?\});')) streams = info['request']['files']['progressive'] streams = sorted(streams, key=lambda i: i['height']) url = streams[-1]['url'] type, ext, size = url_info(url, faker=True) print_info(site_info, title, type, size) if not info_only: download_urls([url], title, ext, size, output_dir, merge=merge, faker=True) ''' site = VimeoExtractor() site.download_by_vid(id, info_only=info_only, output_dir=output_dir, merge=merge, **kwargs)
int->JSON Return a parsed JSON tree of WanMen's API.
def _wanmen_get_json_api_content_by_courseID(courseID): """int->JSON Return a parsed JSON tree of WanMen's API.""" return loads(get_content('http://api.wanmen.org/course/getCourseNested/{courseID}'.format(courseID = courseID)))
JSON, int, int, int->str Get a proper title with courseid+topicID+partID.
def _wanmen_get_title_by_json_topic_part(json_content, tIndex, pIndex): """JSON, int, int, int->str Get a proper title with courseid+topicID+partID.""" return '_'.join([json_content[0]['name'], json_content[0]['Topics'][tIndex]['name'], json_content[0]['Topics'][tIndex]['Parts'][pIndex]['name']])
JSON, int, int, int->str Get one BokeCC video ID with courseid+topicID+partID.
def _wanmen_get_boke_id_by_json_topic_part(json_content, tIndex, pIndex): """JSON, int, int, int->str Get one BokeCC video ID with courseid+topicID+partID.""" return json_content[0]['Topics'][tIndex]['Parts'][pIndex]['ccVideoLink']
int->None Download a WHOLE course. Reuse the API call to save time.
def wanmen_download_by_course(json_api_content, output_dir='.', merge=True, info_only=False, **kwargs): """int->None Download a WHOLE course. Reuse the API call to save time.""" for tIndex in range(len(json_api_content[0]['Topics'])): for pIndex in range(len(json_api_content[0]['Topics'][tIndex]['Parts'])): wanmen_download_by_course_topic_part(json_api_content, tIndex, pIndex, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
int, int->None Download a TOPIC of a course. Reuse the API call to save time.
def wanmen_download_by_course_topic(json_api_content, tIndex, output_dir='.', merge=True, info_only=False, **kwargs): """int, int->None Download a TOPIC of a course. Reuse the API call to save time.""" for pIndex in range(len(json_api_content[0]['Topics'][tIndex]['Parts'])): wanmen_download_by_course_topic_part(json_api_content, tIndex, pIndex, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
int, int, int->None Download ONE PART of the course.
def wanmen_download_by_course_topic_part(json_api_content, tIndex, pIndex, output_dir='.', merge=True, info_only=False, **kwargs): """int, int, int->None Download ONE PART of the course.""" html = json_api_content title = _wanmen_get_title_by_json_topic_part(html, tIndex, pIndex) bokeccID = _wanmen_get_boke_id_by_json_topic_part(html, tIndex, pIndex) bokecc_download_by_id(vid = bokeccID, title = title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
wrapper
def yixia_download(url, output_dir = '.', merge = True, info_only = False, **kwargs): """wrapper""" hostname = urlparse(url).hostname if 'n.miaopai.com' == hostname: smid = match1(url, r'n\.miaopai\.com/media/([^.]+)') miaopai_download_by_smid(smid, output_dir, merge, info_only) return elif 'miaopai.com' in hostname: #Miaopai yixia_download_by_scid = yixia_miaopai_download_by_scid site_info = "Yixia Miaopai" scid = match1(url, r'miaopai\.com/show/channel/([^.]+)\.htm') or \ match1(url, r'miaopai\.com/show/([^.]+)\.htm') or \ match1(url, r'm\.miaopai\.com/show/channel/([^.]+)\.htm') or \ match1(url, r'm\.miaopai\.com/show/channel/([^.]+)') elif 'xiaokaxiu.com' in hostname: #Xiaokaxiu yixia_download_by_scid = yixia_xiaokaxiu_download_by_scid site_info = "Yixia Xiaokaxiu" if re.match(r'http://v.xiaokaxiu.com/v/.+\.html', url): #PC scid = match1(url, r'http://v.xiaokaxiu.com/v/(.+)\.html') elif re.match(r'http://m.xiaokaxiu.com/m/.+\.html', url): #Mobile scid = match1(url, r'http://m.xiaokaxiu.com/m/(.+)\.html') else: pass yixia_download_by_scid(scid, output_dir, merge, info_only)
str, str->True WARNING: NOT THE SAME PARMS AS OTHER FUNCTIONS!!!!!! You can basically download anything with this function but better leave it alone with
def ffmpeg_download_stream(files, title, ext, params={}, output_dir='.', stream=True): """str, str->True WARNING: NOT THE SAME PARMS AS OTHER FUNCTIONS!!!!!! You can basically download anything with this function but better leave it alone with """ output = title + '.' + ext if not (output_dir == '.'): output = output_dir + '/' + output print('Downloading streaming content with FFmpeg, press q to stop recording...') if stream: ffmpeg_params = [FFMPEG] + ['-y', '-re', '-i'] else: ffmpeg_params = [FFMPEG] + ['-y', '-i'] ffmpeg_params.append(files) #not the same here!!!! if FFMPEG == 'avconv': #who cares? ffmpeg_params += ['-c', 'copy'] else: ffmpeg_params += ['-c', 'copy', '-bsf:a', 'aac_adtstoasc'] if params is not None: if len(params) > 0: for k, v in params: ffmpeg_params.append(k) ffmpeg_params.append(v) ffmpeg_params.extend(['--', output]) print(' '.join(ffmpeg_params)) try: a = subprocess.Popen(ffmpeg_params, stdin= subprocess.PIPE) a.communicate() except KeyboardInterrupt: try: a.stdin.write('q'.encode('utf-8')) except: pass return True
Converts a string to a valid filename.
def legitimize(text, os=detect_os()): """Converts a string to a valid filename. """ # POSIX systems text = text.translate({ 0: None, ord('/'): '-', ord('|'): '-', }) # FIXME: do some filesystem detection if os == 'windows' or os == 'cygwin' or os == 'wsl': # Windows (non-POSIX namespace) text = text.translate({ # Reserved in Windows VFAT and NTFS ord(':'): '-', ord('*'): '-', ord('?'): '-', ord('\\'): '-', ord('\"'): '\'', # Reserved in Windows VFAT ord('+'): '-', ord('<'): '-', ord('>'): '-', ord('['): '(', ord(']'): ')', ord('\t'): ' ', }) else: # *nix if os == 'mac': # Mac OS HFS+ text = text.translate({ ord(':'): '-', }) # Remove leading . if text.startswith("."): text = text[1:] text = text[:80] # Trim to 82 Unicode characters long return text
Get (branch, commit) from HEAD of a git repo.
def get_head(repo_path): """Get (branch, commit) from HEAD of a git repo.""" try: ref = open(os.path.join(repo_path, '.git', 'HEAD'), 'r').read().strip()[5:].split('/') branch = ref[-1] commit = open(os.path.join(repo_path, '.git', *ref), 'r').read().strip()[:7] return branch, commit except: return None
Format text with color or other effects into ANSI escaped string.
def sprint(text, *colors): """Format text with color or other effects into ANSI escaped string.""" return "\33[{}m{content}\33[{}m".format(";".join([str(color) for color in colors]), RESET, content=text) if IS_ANSI_TERMINAL and colors else text
Print text to standard output.
def println(text, *colors): """Print text to standard output.""" sys.stdout.write(sprint(text, *colors) + "\n")
Print text to standard error.
def print_err(text, *colors): """Print text to standard error.""" sys.stderr.write(sprint(text, *colors) + "\n")
Print a log message to standard error.
def print_log(text, *colors): """Print a log message to standard error.""" sys.stderr.write(sprint("{}: {}".format(script_name, text), *colors) + "\n")
Print a normal log message.
def i(message): """Print a normal log message.""" print_log(message)
Print a debug log message.
def d(message): """Print a debug log message.""" print_log(message, BLUE)
Print a warning log message.
def w(message): """Print a warning log message.""" print_log(message, YELLOW)
Print an error log message.
def e(message, exit_code=None): """Print an error log message.""" print_log(message, YELLOW, BOLD) if exit_code is not None: sys.exit(exit_code)
What a Terrible Failure!
def wtf(message, exit_code=1): """What a Terrible Failure!""" print_log(message, RED, BOLD) if exit_code is not None: sys.exit(exit_code)
Detect operating system.
def detect_os(): """Detect operating system. """ # Inspired by: # https://github.com/scivision/pybashutils/blob/78b7f2b339cb03b1c37df94015098bbe462f8526/pybashutils/windows_linux_detect.py syst = system().lower() os = 'unknown' if 'cygwin' in syst: os = 'cygwin' elif 'darwin' in syst: os = 'mac' elif 'linux' in syst: os = 'linux' # detect WSL https://github.com/Microsoft/BashOnWindows/issues/423 try: with open('/proc/version', 'r') as f: if 'microsoft' in f.read().lower(): os = 'wsl' except: pass elif 'windows' in syst: os = 'windows' elif 'bsd' in syst: os = 'bsd' return os
Get (width, height) of the current terminal.
def get_terminal_size(): """Get (width, height) of the current terminal.""" try: import fcntl, termios, struct # fcntl module only available on Unix return struct.unpack('hh', fcntl.ioctl(1, termios.TIOCGWINSZ, '1234')) except: return (40, 80)
Searches the provided modules for the named class and returns it.
def find_class_by_name(name, modules): """Searches the provided modules for the named class and returns it.""" modules = [getattr(module, name, None) for module in modules] return next(a for a in modules if a)
Creates the section of the graph which reads the evaluation data. Args: reader: A class which parses the training data. data_pattern: A 'glob' style path to the data files. batch_size: How many examples to process at a time. num_readers: How many I/O threads to use. Returns: A tuple containing the features tensor, labels tensor, and optionally a tensor containing the number of frames per video. The exact dimensions depend on the reader being used. Raises: IOError: If no files matching the given pattern were found.
def get_input_evaluation_tensors(reader, data_pattern, batch_size=1024, num_readers=1): """Creates the section of the graph which reads the evaluation data. Args: reader: A class which parses the training data. data_pattern: A 'glob' style path to the data files. batch_size: How many examples to process at a time. num_readers: How many I/O threads to use. Returns: A tuple containing the features tensor, labels tensor, and optionally a tensor containing the number of frames per video. The exact dimensions depend on the reader being used. Raises: IOError: If no files matching the given pattern were found. """ logging.info("Using batch size of %d for evaluation.", batch_size) with tf.name_scope("eval_input"): files = tf.io.gfile.glob(data_pattern) if not files: raise IOError("Unable to find the evaluation files.") logging.info("number of evaluation files: %d", len(files)) filename_queue = tf.train.string_input_producer(files, shuffle=False, num_epochs=1) eval_data = [ reader.prepare_reader(filename_queue) for _ in range(num_readers) ] return tf.train.batch_join(eval_data, batch_size=batch_size, capacity=3 * batch_size, allow_smaller_final_batch=True, enqueue_many=True)
Creates the Tensorflow graph for evaluation. Args: reader: The data file reader. It should inherit from BaseReader. model: The core model (e.g. logistic or neural net). It should inherit from BaseModel. eval_data_pattern: glob path to the evaluation data files. label_loss_fn: What kind of loss to apply to the model. It should inherit from BaseLoss. batch_size: How many examples to process at a time. num_readers: How many threads to use for I/O operations.
def build_graph(reader, model, eval_data_pattern, label_loss_fn, batch_size=1024, num_readers=1): """Creates the Tensorflow graph for evaluation. Args: reader: The data file reader. It should inherit from BaseReader. model: The core model (e.g. logistic or neural net). It should inherit from BaseModel. eval_data_pattern: glob path to the evaluation data files. label_loss_fn: What kind of loss to apply to the model. It should inherit from BaseLoss. batch_size: How many examples to process at a time. num_readers: How many threads to use for I/O operations. """ global_step = tf.Variable(0, trainable=False, name="global_step") input_data_dict = get_input_evaluation_tensors(reader, eval_data_pattern, batch_size=batch_size, num_readers=num_readers) video_id_batch = input_data_dict["video_ids"] model_input_raw = input_data_dict["video_matrix"] labels_batch = input_data_dict["labels"] num_frames = input_data_dict["num_frames"] tf.compat.v1.summary.histogram("model_input_raw", model_input_raw) feature_dim = len(model_input_raw.get_shape()) - 1 # Normalize input features. model_input = tf.nn.l2_normalize(model_input_raw, feature_dim) with tf.compat.v1.variable_scope("tower"): result = model.create_model(model_input, num_frames=num_frames, vocab_size=reader.num_classes, labels=labels_batch, is_training=False) predictions = result["predictions"] tf.compat.v1.summary.histogram("model_activations", predictions) if "loss" in result.keys(): label_loss = result["loss"] else: label_loss = label_loss_fn.calculate_loss(predictions, labels_batch) tf.compat.v1.add_to_collection("global_step", global_step) tf.compat.v1.add_to_collection("loss", label_loss) tf.compat.v1.add_to_collection("predictions", predictions) tf.compat.v1.add_to_collection("input_batch", model_input) tf.compat.v1.add_to_collection("input_batch_raw", model_input_raw) tf.compat.v1.add_to_collection("video_id_batch", video_id_batch) tf.compat.v1.add_to_collection("num_frames", num_frames) tf.compat.v1.add_to_collection("labels", tf.cast(labels_batch, tf.float32)) if FLAGS.segment_labels: tf.compat.v1.add_to_collection("label_weights", input_data_dict["label_weights"]) tf.compat.v1.add_to_collection("summary_op", tf.compat.v1.summary.merge_all())
Run the evaluation loop once. Args: fetches: a dict of tensors to be run within Session. saver: a tensorflow saver to restore the model. summary_writer: a tensorflow summary_writer evl_metrics: an EvaluationMetrics object. last_global_step_val: the global step used in the previous evaluation. Returns: The global_step used in the latest model.
def evaluation_loop(fetches, saver, summary_writer, evl_metrics, last_global_step_val): """Run the evaluation loop once. Args: fetches: a dict of tensors to be run within Session. saver: a tensorflow saver to restore the model. summary_writer: a tensorflow summary_writer evl_metrics: an EvaluationMetrics object. last_global_step_val: the global step used in the previous evaluation. Returns: The global_step used in the latest model. """ global_step_val = -1 with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions( allow_growth=True))) as sess: latest_checkpoint = tf.train.latest_checkpoint(FLAGS.train_dir) if latest_checkpoint: logging.info("Loading checkpoint for eval: %s", latest_checkpoint) # Restores from checkpoint saver.restore(sess, latest_checkpoint) # Assuming model_checkpoint_path looks something like: # /my-favorite-path/yt8m_train/model.ckpt-0, extract global_step from it. global_step_val = os.path.basename(latest_checkpoint).split("-")[-1] # Save model if FLAGS.segment_labels: inference_model_name = "segment_inference_model" else: inference_model_name = "inference_model" saver.save( sess, os.path.join(FLAGS.train_dir, "inference_model", inference_model_name)) else: logging.info("No checkpoint file found.") return global_step_val if global_step_val == last_global_step_val: logging.info( "skip this checkpoint global_step_val=%s " "(same as the previous one).", global_step_val) return global_step_val sess.run([tf.local_variables_initializer()]) # Start the queue runners. coord = tf.train.Coordinator() try: threads = [] for qr in tf.compat.v1.get_collection(tf.GraphKeys.QUEUE_RUNNERS): threads.extend( qr.create_threads(sess, coord=coord, daemon=True, start=True)) logging.info("enter eval_once loop global_step_val = %s. ", global_step_val) evl_metrics.clear() examples_processed = 0 while not coord.should_stop(): batch_start_time = time.time() output_data_dict = sess.run(fetches) seconds_per_batch = time.time() - batch_start_time labels_val = output_data_dict["labels"] summary_val = output_data_dict["summary"] example_per_second = labels_val.shape[0] / seconds_per_batch examples_processed += labels_val.shape[0] predictions = output_data_dict["predictions"] if FLAGS.segment_labels: # This is a workaround to ignore the unrated labels. predictions *= output_data_dict["label_weights"] iteration_info_dict = evl_metrics.accumulate(predictions, labels_val, output_data_dict["loss"]) iteration_info_dict["examples_per_second"] = example_per_second iterinfo = utils.AddGlobalStepSummary( summary_writer, global_step_val, iteration_info_dict, summary_scope="SegEval" if FLAGS.segment_labels else "Eval") logging.info("examples_processed: %d | %s", examples_processed, iterinfo) except tf.errors.OutOfRangeError as e: logging.info( "Done with batched inference. Now calculating global performance " "metrics.") # calculate the metrics for the entire epoch epoch_info_dict = evl_metrics.get() epoch_info_dict["epoch_id"] = global_step_val summary_writer.add_summary(summary_val, global_step_val) epochinfo = utils.AddEpochSummary( summary_writer, global_step_val, epoch_info_dict, summary_scope="SegEval" if FLAGS.segment_labels else "Eval") logging.info(epochinfo) evl_metrics.clear() except Exception as e: # pylint: disable=broad-except logging.info("Unexpected exception: %s", str(e)) coord.request_stop(e) coord.request_stop() coord.join(threads, stop_grace_period_secs=10) logging.info("Total: examples_processed: %d", examples_processed) return global_step_val
Starts main evaluation loop.
def evaluate(): """Starts main evaluation loop.""" tf.compat.v1.set_random_seed(0) # for reproducibility # Write json of flags model_flags_path = os.path.join(FLAGS.train_dir, "model_flags.json") if not file_io.file_exists(model_flags_path): raise IOError(("Cannot find file %s. Did you run train.py on the same " "--train_dir?") % model_flags_path) flags_dict = json.loads(file_io.FileIO(model_flags_path, mode="r").read()) with tf.Graph().as_default(): # convert feature_names and feature_sizes to lists of values feature_names, feature_sizes = utils.GetListOfFeatureNamesAndSizes( flags_dict["feature_names"], flags_dict["feature_sizes"]) if flags_dict["frame_features"]: reader = readers.YT8MFrameFeatureReader( feature_names=feature_names, feature_sizes=feature_sizes, segment_labels=FLAGS.segment_labels) else: reader = readers.YT8MAggregatedFeatureReader(feature_names=feature_names, feature_sizes=feature_sizes) model = find_class_by_name(flags_dict["model"], [frame_level_models, video_level_models])() label_loss_fn = find_class_by_name(flags_dict["label_loss"], [losses])() if not FLAGS.eval_data_pattern: raise IOError("'eval_data_pattern' was not specified. Nothing to " "evaluate.") build_graph(reader=reader, model=model, eval_data_pattern=FLAGS.eval_data_pattern, label_loss_fn=label_loss_fn, num_readers=FLAGS.num_readers, batch_size=FLAGS.batch_size) logging.info("built evaluation graph") # A dict of tensors to be run in Session. fetches = { "video_id": tf.compat.v1.get_collection("video_id_batch")[0], "predictions": tf.compat.v1.get_collection("predictions")[0], "labels": tf.compat.v1.get_collection("labels")[0], "loss": tf.compat.v1.get_collection("loss")[0], "summary": tf.compat.v1.get_collection("summary_op")[0] } if FLAGS.segment_labels: fetches["label_weights"] = tf.compat.v1.get_collection("label_weights")[0] saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables()) summary_writer = tf.compat.v1.summary.FileWriter( os.path.join(FLAGS.train_dir, "eval"), graph=tf.compat.v1.get_default_graph()) evl_metrics = eval_util.EvaluationMetrics(reader.num_classes, FLAGS.top_k, None) last_global_step_val = -1 while True: last_global_step_val = evaluation_loop(fetches, saver, summary_writer, evl_metrics, last_global_step_val) if FLAGS.run_once: break
Merges a list of lists into a single list.
def flatten(l): """Merges a list of lists into a single list. """ return [item for sublist in l for item in sublist]
Performs a local (numpy) calculation of the hit at one. Args: predictions: Matrix containing the outputs of the model. Dimensions are 'batch' x 'num_classes'. actuals: Matrix containing the ground truth labels. Dimensions are 'batch' x 'num_classes'. Returns: float: The average hit at one across the entire batch.
def calculate_hit_at_one(predictions, actuals): """Performs a local (numpy) calculation of the hit at one. Args: predictions: Matrix containing the outputs of the model. Dimensions are 'batch' x 'num_classes'. actuals: Matrix containing the ground truth labels. Dimensions are 'batch' x 'num_classes'. Returns: float: The average hit at one across the entire batch. """ top_prediction = numpy.argmax(predictions, 1) hits = actuals[numpy.arange(actuals.shape[0]), top_prediction] return numpy.average(hits)
Performs a local (numpy) calculation of the PERR. Args: predictions: Matrix containing the outputs of the model. Dimensions are 'batch' x 'num_classes'. actuals: Matrix containing the ground truth labels. Dimensions are 'batch' x 'num_classes'. Returns: float: The average precision at equal recall rate across the entire batch.
def calculate_precision_at_equal_recall_rate(predictions, actuals): """Performs a local (numpy) calculation of the PERR. Args: predictions: Matrix containing the outputs of the model. Dimensions are 'batch' x 'num_classes'. actuals: Matrix containing the ground truth labels. Dimensions are 'batch' x 'num_classes'. Returns: float: The average precision at equal recall rate across the entire batch. """ aggregated_precision = 0.0 num_videos = actuals.shape[0] for row in numpy.arange(num_videos): num_labels = int(numpy.sum(actuals[row])) top_indices = numpy.argpartition(predictions[row], -num_labels)[-num_labels:] item_precision = 0.0 for label_index in top_indices: if predictions[row][label_index] > 0: item_precision += actuals[row][label_index] item_precision /= top_indices.size aggregated_precision += item_precision aggregated_precision /= num_videos return aggregated_precision
Performs a local (numpy) calculation of the global average precision. Only the top_k predictions are taken for each of the videos. Args: predictions: Matrix containing the outputs of the model. Dimensions are 'batch' x 'num_classes'. actuals: Matrix containing the ground truth labels. Dimensions are 'batch' x 'num_classes'. top_k: How many predictions to use per video. Returns: float: The global average precision.
def calculate_gap(predictions, actuals, top_k=20): """Performs a local (numpy) calculation of the global average precision. Only the top_k predictions are taken for each of the videos. Args: predictions: Matrix containing the outputs of the model. Dimensions are 'batch' x 'num_classes'. actuals: Matrix containing the ground truth labels. Dimensions are 'batch' x 'num_classes'. top_k: How many predictions to use per video. Returns: float: The global average precision. """ gap_calculator = ap_calculator.AveragePrecisionCalculator() sparse_predictions, sparse_labels, num_positives = top_k_by_class( predictions, actuals, top_k) gap_calculator.accumulate(flatten(sparse_predictions), flatten(sparse_labels), sum(num_positives)) return gap_calculator.peek_ap_at_n()
Extracts the top k predictions for each video, sorted by class. Args: predictions: A numpy matrix containing the outputs of the model. Dimensions are 'batch' x 'num_classes'. k: the top k non-zero entries to preserve in each prediction. Returns: A tuple (predictions,labels, true_positives). 'predictions' and 'labels' are lists of lists of floats. 'true_positives' is a list of scalars. The length of the lists are equal to the number of classes. The entries in the predictions variable are probability predictions, and the corresponding entries in the labels variable are the ground truth for those predictions. The entries in 'true_positives' are the number of true positives for each class in the ground truth. Raises: ValueError: An error occurred when the k is not a positive integer.
def top_k_by_class(predictions, labels, k=20): """Extracts the top k predictions for each video, sorted by class. Args: predictions: A numpy matrix containing the outputs of the model. Dimensions are 'batch' x 'num_classes'. k: the top k non-zero entries to preserve in each prediction. Returns: A tuple (predictions,labels, true_positives). 'predictions' and 'labels' are lists of lists of floats. 'true_positives' is a list of scalars. The length of the lists are equal to the number of classes. The entries in the predictions variable are probability predictions, and the corresponding entries in the labels variable are the ground truth for those predictions. The entries in 'true_positives' are the number of true positives for each class in the ground truth. Raises: ValueError: An error occurred when the k is not a positive integer. """ if k <= 0: raise ValueError("k must be a positive integer.") k = min(k, predictions.shape[1]) num_classes = predictions.shape[1] prediction_triplets = [] for video_index in range(predictions.shape[0]): prediction_triplets.extend( top_k_triplets(predictions[video_index], labels[video_index], k)) out_predictions = [[] for _ in range(num_classes)] out_labels = [[] for _ in range(num_classes)] for triplet in prediction_triplets: out_predictions[triplet[0]].append(triplet[1]) out_labels[triplet[0]].append(triplet[2]) out_true_positives = [numpy.sum(labels[:, i]) for i in range(num_classes)] return out_predictions, out_labels, out_true_positives
Get the top_k for a 1-d numpy array. Returns a sparse list of tuples in (prediction, class) format
def top_k_triplets(predictions, labels, k=20): """Get the top_k for a 1-d numpy array. Returns a sparse list of tuples in (prediction, class) format """ m = len(predictions) k = min(k, m) indices = numpy.argpartition(predictions, -k)[-k:] return [(index, predictions[index], labels[index]) for index in indices]
Create an information line the submission file.
def format_lines(video_ids, predictions, top_k, whitelisted_cls_mask=None): """Create an information line the submission file.""" batch_size = len(video_ids) for video_index in range(batch_size): video_prediction = predictions[video_index] if whitelisted_cls_mask is not None: # Whitelist classes. video_prediction *= whitelisted_cls_mask top_indices = np.argpartition(video_prediction, -top_k)[-top_k:] line = [(class_index, predictions[video_index][class_index]) for class_index in top_indices] line = sorted(line, key=lambda p: -p[1]) yield (video_ids[video_index] + "," + " ".join("%i %g" % (label, score) for (label, score) in line) + "\n").encode("utf8")
Creates the section of the graph which reads the input data. Args: reader: A class which parses the input data. data_pattern: A 'glob' style path to the data files. batch_size: How many examples to process at a time. num_readers: How many I/O threads to use. Returns: A tuple containing the features tensor, labels tensor, and optionally a tensor containing the number of frames per video. The exact dimensions depend on the reader being used. Raises: IOError: If no files matching the given pattern were found.
def get_input_data_tensors(reader, data_pattern, batch_size, num_readers=1): """Creates the section of the graph which reads the input data. Args: reader: A class which parses the input data. data_pattern: A 'glob' style path to the data files. batch_size: How many examples to process at a time. num_readers: How many I/O threads to use. Returns: A tuple containing the features tensor, labels tensor, and optionally a tensor containing the number of frames per video. The exact dimensions depend on the reader being used. Raises: IOError: If no files matching the given pattern were found. """ with tf.name_scope("input"): files = gfile.Glob(data_pattern) if not files: raise IOError("Unable to find input files. data_pattern='" + data_pattern + "'") logging.info("number of input files: " + str(len(files))) filename_queue = tf.train.string_input_producer(files, num_epochs=1, shuffle=False) examples_and_labels = [ reader.prepare_reader(filename_queue) for _ in range(num_readers) ] input_data_dict = (tf.train.batch_join(examples_and_labels, batch_size=batch_size, allow_smaller_final_batch=True, enqueue_many=True)) video_id_batch = input_data_dict["video_ids"] video_batch = input_data_dict["video_matrix"] num_frames_batch = input_data_dict["num_frames"] return video_id_batch, video_batch, num_frames_batch
Get segment-level inputs from frame-level features.
def get_segments(batch_video_mtx, batch_num_frames, segment_size): """Get segment-level inputs from frame-level features.""" video_batch_size = batch_video_mtx.shape[0] max_frame = batch_video_mtx.shape[1] feature_dim = batch_video_mtx.shape[-1] padded_segment_sizes = (batch_num_frames + segment_size - 1) // segment_size padded_segment_sizes *= segment_size segment_mask = ( 0 < (padded_segment_sizes[:, np.newaxis] - np.arange(0, max_frame))) # Segment bags. frame_bags = batch_video_mtx.reshape((-1, feature_dim)) segment_frames = frame_bags[segment_mask.reshape(-1)].reshape( (-1, segment_size, feature_dim)) # Segment num frames. segment_start_times = np.arange(0, max_frame, segment_size) num_segments = batch_num_frames[:, np.newaxis] - segment_start_times num_segment_bags = num_segments.reshape((-1)) valid_segment_mask = num_segment_bags > 0 segment_num_frames = num_segment_bags[valid_segment_mask] segment_num_frames[segment_num_frames > segment_size] = segment_size max_segment_num = (max_frame + segment_size - 1) // segment_size video_idxs = np.tile( np.arange(0, video_batch_size)[:, np.newaxis], [1, max_segment_num]) segment_idxs = np.tile(segment_start_times, [video_batch_size, 1]) idx_bags = np.stack([video_idxs, segment_idxs], axis=-1).reshape((-1, 2)) video_segment_ids = idx_bags[valid_segment_mask] return { "video_batch": segment_frames, "num_frames_batch": segment_num_frames, "video_segment_ids": video_segment_ids }
Inference function.
def inference(reader, train_dir, data_pattern, out_file_location, batch_size, top_k): """Inference function.""" with tf.Session(config=tf.ConfigProto( allow_soft_placement=True)) as sess, gfile.Open(out_file_location, "w+") as out_file: video_id_batch, video_batch, num_frames_batch = get_input_data_tensors( reader, data_pattern, batch_size) inference_model_name = "segment_inference_model" if FLAGS.segment_labels else "inference_model" checkpoint_file = os.path.join(train_dir, "inference_model", inference_model_name) if not gfile.Exists(checkpoint_file + ".meta"): raise IOError("Cannot find %s. Did you run eval.py?" % checkpoint_file) meta_graph_location = checkpoint_file + ".meta" logging.info("loading meta-graph: " + meta_graph_location) if FLAGS.output_model_tgz: with tarfile.open(FLAGS.output_model_tgz, "w:gz") as tar: for model_file in glob.glob(checkpoint_file + ".*"): tar.add(model_file, arcname=os.path.basename(model_file)) tar.add(os.path.join(train_dir, "model_flags.json"), arcname="model_flags.json") print("Tarred model onto " + FLAGS.output_model_tgz) with tf.device("/cpu:0"): saver = tf.train.import_meta_graph(meta_graph_location, clear_devices=True) logging.info("restoring variables from " + checkpoint_file) saver.restore(sess, checkpoint_file) input_tensor = tf.get_collection("input_batch_raw")[0] num_frames_tensor = tf.get_collection("num_frames")[0] predictions_tensor = tf.get_collection("predictions")[0] # Workaround for num_epochs issue. def set_up_init_ops(variables): init_op_list = [] for variable in list(variables): if "train_input" in variable.name: init_op_list.append(tf.assign(variable, 1)) variables.remove(variable) init_op_list.append(tf.variables_initializer(variables)) return init_op_list sess.run( set_up_init_ops(tf.get_collection_ref(tf.GraphKeys.LOCAL_VARIABLES))) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) num_examples_processed = 0 start_time = time.time() whitelisted_cls_mask = None if FLAGS.segment_labels: final_out_file = out_file out_file = tempfile.NamedTemporaryFile() logging.info( "Segment temp prediction output will be written to temp file: %s", out_file.name) if FLAGS.segment_label_ids_file: whitelisted_cls_mask = np.zeros((predictions_tensor.get_shape()[-1],), dtype=np.float32) segment_label_ids_file = FLAGS.segment_label_ids_file if segment_label_ids_file.startswith("http"): logging.info("Retrieving segment ID whitelist files from %s...", segment_label_ids_file) segment_label_ids_file, _ = urllib.request.urlretrieve( segment_label_ids_file) with tf.io.gfile.GFile(segment_label_ids_file) as fobj: for line in fobj: try: cls_id = int(line) whitelisted_cls_mask[cls_id] = 1. except ValueError: # Simply skip the non-integer line. continue out_file.write(u"VideoId,LabelConfidencePairs\n".encode("utf8")) try: while not coord.should_stop(): video_id_batch_val, video_batch_val, num_frames_batch_val = sess.run( [video_id_batch, video_batch, num_frames_batch]) if FLAGS.segment_labels: results = get_segments(video_batch_val, num_frames_batch_val, 5) video_segment_ids = results["video_segment_ids"] video_id_batch_val = video_id_batch_val[video_segment_ids[:, 0]] video_id_batch_val = np.array([ "%s:%d" % (x.decode("utf8"), y) for x, y in zip(video_id_batch_val, video_segment_ids[:, 1]) ]) video_batch_val = results["video_batch"] num_frames_batch_val = results["num_frames_batch"] if input_tensor.get_shape()[1] != video_batch_val.shape[1]: raise ValueError("max_frames mismatch. Please re-run the eval.py " "with correct segment_labels settings.") predictions_val, = sess.run([predictions_tensor], feed_dict={ input_tensor: video_batch_val, num_frames_tensor: num_frames_batch_val }) now = time.time() num_examples_processed += len(video_batch_val) elapsed_time = now - start_time logging.info("num examples processed: " + str(num_examples_processed) + " elapsed seconds: " + "{0:.2f}".format(elapsed_time) + " examples/sec: %.2f" % (num_examples_processed / elapsed_time)) for line in format_lines(video_id_batch_val, predictions_val, top_k, whitelisted_cls_mask): out_file.write(line) out_file.flush() except tf.errors.OutOfRangeError: logging.info("Done with inference. The output file was written to " + out_file.name) finally: coord.request_stop() if FLAGS.segment_labels: # Re-read the file and do heap sort. # Create multiple heaps. logging.info("Post-processing segment predictions...") heaps = {} out_file.seek(0, 0) for line in out_file: segment_id, preds = line.decode("utf8").split(",") if segment_id == "VideoId": # Skip the headline. continue preds = preds.split(" ") pred_cls_ids = [int(preds[idx]) for idx in range(0, len(preds), 2)] pred_cls_scores = [ float(preds[idx]) for idx in range(1, len(preds), 2) ] for cls, score in zip(pred_cls_ids, pred_cls_scores): if not whitelisted_cls_mask[cls]: # Skip non-whitelisted classes. continue if cls not in heaps: heaps[cls] = [] if len(heaps[cls]) >= FLAGS.segment_max_pred: heapq.heappushpop(heaps[cls], (score, segment_id)) else: heapq.heappush(heaps[cls], (score, segment_id)) logging.info("Writing sorted segment predictions to: %s", final_out_file.name) final_out_file.write("Class,Segments\n") for cls, cls_heap in heaps.items(): cls_heap.sort(key=lambda x: x[0], reverse=True) final_out_file.write("%d,%s\n" % (cls, " ".join([x[1] for x in cls_heap]))) final_out_file.close() out_file.close() coord.join(threads) sess.close()
Samples a random sequence of frames of size num_samples. Args: model_input: A tensor of size batch_size x max_frames x feature_size num_frames: A tensor of size batch_size x 1 num_samples: A scalar Returns: `model_input`: A tensor of size batch_size x num_samples x feature_size
def SampleRandomSequence(model_input, num_frames, num_samples): """Samples a random sequence of frames of size num_samples. Args: model_input: A tensor of size batch_size x max_frames x feature_size num_frames: A tensor of size batch_size x 1 num_samples: A scalar Returns: `model_input`: A tensor of size batch_size x num_samples x feature_size """ batch_size = tf.shape(model_input)[0] frame_index_offset = tf.tile(tf.expand_dims(tf.range(num_samples), 0), [batch_size, 1]) max_start_frame_index = tf.maximum(num_frames - num_samples, 0) start_frame_index = tf.cast( tf.multiply(tf.random_uniform([batch_size, 1]), tf.cast(max_start_frame_index + 1, tf.float32)), tf.int32) frame_index = tf.minimum(start_frame_index + frame_index_offset, tf.cast(num_frames - 1, tf.int32)) batch_index = tf.tile(tf.expand_dims(tf.range(batch_size), 1), [1, num_samples]) index = tf.stack([batch_index, frame_index], 2) return tf.gather_nd(model_input, index)
Samples a random set of frames of size num_samples. Args: model_input: A tensor of size batch_size x max_frames x feature_size num_frames: A tensor of size batch_size x 1 num_samples: A scalar Returns: `model_input`: A tensor of size batch_size x num_samples x feature_size
def SampleRandomFrames(model_input, num_frames, num_samples): """Samples a random set of frames of size num_samples. Args: model_input: A tensor of size batch_size x max_frames x feature_size num_frames: A tensor of size batch_size x 1 num_samples: A scalar Returns: `model_input`: A tensor of size batch_size x num_samples x feature_size """ batch_size = tf.shape(model_input)[0] frame_index = tf.cast( tf.multiply(tf.random_uniform([batch_size, num_samples]), tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32) batch_index = tf.tile(tf.expand_dims(tf.range(batch_size), 1), [1, num_samples]) index = tf.stack([batch_index, frame_index], 2) return tf.gather_nd(model_input, index)
Pools over the frames of a video. Args: frames: A tensor with shape [batch_size, num_frames, feature_size]. method: "average", "max", "attention", or "none". Returns: A tensor with shape [batch_size, feature_size] for average, max, or attention pooling. A tensor with shape [batch_size*num_frames, feature_size] for none pooling. Raises: ValueError: if method is other than "average", "max", "attention", or "none".
def FramePooling(frames, method, **unused_params): """Pools over the frames of a video. Args: frames: A tensor with shape [batch_size, num_frames, feature_size]. method: "average", "max", "attention", or "none". Returns: A tensor with shape [batch_size, feature_size] for average, max, or attention pooling. A tensor with shape [batch_size*num_frames, feature_size] for none pooling. Raises: ValueError: if method is other than "average", "max", "attention", or "none". """ if method == "average": return tf.reduce_mean(frames, 1) elif method == "max": return tf.reduce_max(frames, 1) elif method == "none": feature_size = frames.shape_as_list()[2] return tf.reshape(frames, [-1, feature_size]) else: raise ValueError("Unrecognized pooling method: %s" % method)
Truncates or pads a tensor to new_size on on a given axis. Truncate or extend tensor such that tensor.shape[axis] == new_size. If the size increases, the padding will be performed at the end, using fill_value. Args: tensor: The tensor to be resized. axis: An integer representing the dimension to be sliced. new_size: An integer or 0d tensor representing the new value for tensor.shape[axis]. fill_value: Value to use to fill any new entries in the tensor. Will be cast to the type of tensor. Returns: The resized tensor.
def resize_axis(tensor, axis, new_size, fill_value=0): """Truncates or pads a tensor to new_size on on a given axis. Truncate or extend tensor such that tensor.shape[axis] == new_size. If the size increases, the padding will be performed at the end, using fill_value. Args: tensor: The tensor to be resized. axis: An integer representing the dimension to be sliced. new_size: An integer or 0d tensor representing the new value for tensor.shape[axis]. fill_value: Value to use to fill any new entries in the tensor. Will be cast to the type of tensor. Returns: The resized tensor. """ tensor = tf.convert_to_tensor(tensor) shape = tf.unstack(tf.shape(tensor)) pad_shape = shape[:] pad_shape[axis] = tf.maximum(0, new_size - shape[axis]) shape[axis] = tf.minimum(shape[axis], new_size) shape = tf.stack(shape) resized = tf.concat([ tf.slice(tensor, tf.zeros_like(shape), shape), tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype)) ], axis) # Update shape. new_shape = tensor.get_shape().as_list() # A copy is being made. new_shape[axis] = new_size resized.set_shape(new_shape) return resized
Read labels from TFRecords. Args: data_pattern: the data pattern to the TFRecords. cache_path: the cache path for the label file. Returns: a Labels object.
def read_labels(data_pattern, cache_path=""): """Read labels from TFRecords. Args: data_pattern: the data pattern to the TFRecords. cache_path: the cache path for the label file. Returns: a Labels object. """ if cache_path: if tf.gfile.Exists(cache_path): tf.logging.info("Reading cached labels from %s..." % cache_path) return Labels.from_file(cache_path) tf.enable_eager_execution() data_paths = tf.gfile.Glob(data_pattern) ds = tf.data.TFRecordDataset(data_paths, num_parallel_reads=50) context_features = { "id": tf.FixedLenFeature([], tf.string), "segment_labels": tf.VarLenFeature(tf.int64), "segment_start_times": tf.VarLenFeature(tf.int64), "segment_scores": tf.VarLenFeature(tf.float32) } def _parse_se_func(sequence_example): return tf.parse_single_sequence_example(sequence_example, context_features=context_features) ds = ds.map(_parse_se_func) rated_labels = {} tf.logging.info("Reading labels from TFRecords...") last_batch = 0 batch_size = 5000 for cxt_feature_val, _ in ds: video_id = cxt_feature_val["id"].numpy() segment_labels = cxt_feature_val["segment_labels"].values.numpy() segment_start_times = cxt_feature_val["segment_start_times"].values.numpy() segment_scores = cxt_feature_val["segment_scores"].values.numpy() for label, start_time, score in zip(segment_labels, segment_start_times, segment_scores): rated_labels[("%s:%d" % (video_id, start_time), label)] = score batch_id = len(rated_labels) // batch_size if batch_id != last_batch: tf.logging.info("%d examples processed.", len(rated_labels)) last_batch = batch_id tf.logging.info("Finish reading labels from TFRecords...") labels_obj = Labels(rated_labels) if cache_path: tf.logging.info("Caching labels to %s..." % cache_path) labels_obj.to_file(cache_path) return labels_obj
Read segement predictions. Args: file_path: the submission file path. labels: a Labels object containing the eval labels. top_n: the per-class class capping. Returns: a segment prediction list for each classes.
def read_segment_predictions(file_path, labels, top_n=None): """Read segement predictions. Args: file_path: the submission file path. labels: a Labels object containing the eval labels. top_n: the per-class class capping. Returns: a segment prediction list for each classes. """ cls_preds = {} # A label_id to pred list mapping. with tf.gfile.Open(file_path) as fobj: tf.logging.info("Reading predictions from %s..." % file_path) for line in fobj: label_id, pred_ids_val = line.split(",") pred_ids = pred_ids_val.split(" ") if top_n: pred_ids = pred_ids[:top_n] pred_ids = [ pred_id for pred_id in pred_ids if (pred_id, int(label_id)) in labels.labels ] cls_preds[int(label_id)] = pred_ids if len(cls_preds) % 50 == 0: tf.logging.info("Processed %d classes..." % len(cls_preds)) tf.logging.info("Finish reading predictions.") return cls_preds
Entry function of the script.
def main(unused_argv): """Entry function of the script.""" if not FLAGS.submission_file: raise ValueError("You must input submission file.") eval_labels = read_labels(FLAGS.eval_data_pattern, cache_path=FLAGS.label_cache) tf.logging.info("Total rated segments: %d." % len(eval_labels.labels)) positive_counter = {} for k, v in eval_labels.labels.items(): _, label_id = k if v > 0: positive_counter[label_id] = positive_counter.get(label_id, 0) + 1 seg_preds = read_segment_predictions(FLAGS.submission_file, eval_labels, top_n=FLAGS.top_n) map_cal = map_calculator.MeanAveragePrecisionCalculator(len(seg_preds)) seg_labels = [] seg_scored_preds = [] num_positives = [] for label_id in sorted(seg_preds): class_preds = seg_preds[label_id] seg_label = [eval_labels.labels[(pred, label_id)] for pred in class_preds] seg_labels.append(seg_label) seg_scored_pred = [] if class_preds: seg_scored_pred = [ float(x) / len(class_preds) for x in range(len(class_preds), 0, -1) ] seg_scored_preds.append(seg_scored_pred) num_positives.append(positive_counter[label_id]) map_cal.accumulate(seg_scored_preds, seg_labels, num_positives) map_at_n = np.mean(map_cal.peek_map_at_n()) tf.logging.info("Num classes: %d | mAP@%d: %.6f" % (len(seg_preds), FLAGS.top_n, map_at_n))
Checks that the given string matches a class of the expected type. Args: flag_value: A string naming the class to instantiate. category: A string used further describe the class in error messages (e.g. 'model', 'reader', 'loss'). modules: A list of modules to search for the given class. expected_superclass: A class that the given class should inherit from. Raises: FlagsError: If the given class could not be found or if the first class found with that name doesn't inherit from the expected superclass. Returns: True if a class was found that matches the given constraints.
def validate_class_name(flag_value, category, modules, expected_superclass): """Checks that the given string matches a class of the expected type. Args: flag_value: A string naming the class to instantiate. category: A string used further describe the class in error messages (e.g. 'model', 'reader', 'loss'). modules: A list of modules to search for the given class. expected_superclass: A class that the given class should inherit from. Raises: FlagsError: If the given class could not be found or if the first class found with that name doesn't inherit from the expected superclass. Returns: True if a class was found that matches the given constraints. """ candidates = [getattr(module, flag_value, None) for module in modules] for candidate in candidates: if not candidate: continue if not issubclass(candidate, expected_superclass): raise flags.FlagsError( "%s '%s' doesn't inherit from %s." % (category, flag_value, expected_superclass.__name__)) return True raise flags.FlagsError("Unable to find %s '%s'." % (category, flag_value))
Creates the section of the graph which reads the training data. Args: reader: A class which parses the training data. data_pattern: A 'glob' style path to the data files. batch_size: How many examples to process at a time. num_epochs: How many passes to make over the training data. Set to 'None' to run indefinitely. num_readers: How many I/O threads to use. Returns: A tuple containing the features tensor, labels tensor, and optionally a tensor containing the number of frames per video. The exact dimensions depend on the reader being used. Raises: IOError: If no files matching the given pattern were found.
def get_input_data_tensors(reader, data_pattern, batch_size=1000, num_epochs=None, num_readers=1): """Creates the section of the graph which reads the training data. Args: reader: A class which parses the training data. data_pattern: A 'glob' style path to the data files. batch_size: How many examples to process at a time. num_epochs: How many passes to make over the training data. Set to 'None' to run indefinitely. num_readers: How many I/O threads to use. Returns: A tuple containing the features tensor, labels tensor, and optionally a tensor containing the number of frames per video. The exact dimensions depend on the reader being used. Raises: IOError: If no files matching the given pattern were found. """ logging.info("Using batch size of " + str(batch_size) + " for training.") with tf.name_scope("train_input"): files = gfile.Glob(data_pattern) if not files: raise IOError("Unable to find training files. data_pattern='" + data_pattern + "'.") logging.info("Number of training files: %s.", str(len(files))) filename_queue = tf.train.string_input_producer(files, num_epochs=num_epochs, shuffle=True) training_data = [ reader.prepare_reader(filename_queue) for _ in range(num_readers) ] return tf.train.shuffle_batch_join(training_data, batch_size=batch_size, capacity=batch_size * 5, min_after_dequeue=batch_size, allow_smaller_final_batch=True, enqueue_many=True)
Searches the provided modules for the named class and returns it.
def find_class_by_name(name, modules): """Searches the provided modules for the named class and returns it.""" modules = [getattr(module, name, None) for module in modules] return next(a for a in modules if a)
Creates the Tensorflow graph. This will only be called once in the life of a training model, because after the graph is created the model will be restored from a meta graph file rather than being recreated. Args: reader: The data file reader. It should inherit from BaseReader. model: The core model (e.g. logistic or neural net). It should inherit from BaseModel. train_data_pattern: glob path to the training data files. label_loss_fn: What kind of loss to apply to the model. It should inherit from BaseLoss. batch_size: How many examples to process at a time. base_learning_rate: What learning rate to initialize the optimizer with. optimizer_class: Which optimization algorithm to use. clip_gradient_norm: Magnitude of the gradient to clip to. regularization_penalty: How much weight to give the regularization loss compared to the label loss. num_readers: How many threads to use for I/O operations. num_epochs: How many passes to make over the data. 'None' means an unlimited number of passes.
def build_graph(reader, model, train_data_pattern, label_loss_fn=losses.CrossEntropyLoss(), batch_size=1000, base_learning_rate=0.01, learning_rate_decay_examples=1000000, learning_rate_decay=0.95, optimizer_class=tf.train.AdamOptimizer, clip_gradient_norm=1.0, regularization_penalty=1, num_readers=1, num_epochs=None): """Creates the Tensorflow graph. This will only be called once in the life of a training model, because after the graph is created the model will be restored from a meta graph file rather than being recreated. Args: reader: The data file reader. It should inherit from BaseReader. model: The core model (e.g. logistic or neural net). It should inherit from BaseModel. train_data_pattern: glob path to the training data files. label_loss_fn: What kind of loss to apply to the model. It should inherit from BaseLoss. batch_size: How many examples to process at a time. base_learning_rate: What learning rate to initialize the optimizer with. optimizer_class: Which optimization algorithm to use. clip_gradient_norm: Magnitude of the gradient to clip to. regularization_penalty: How much weight to give the regularization loss compared to the label loss. num_readers: How many threads to use for I/O operations. num_epochs: How many passes to make over the data. 'None' means an unlimited number of passes. """ global_step = tf.Variable(0, trainable=False, name="global_step") local_device_protos = device_lib.list_local_devices() gpus = [x.name for x in local_device_protos if x.device_type == "GPU"] gpus = gpus[:FLAGS.num_gpu] num_gpus = len(gpus) if num_gpus > 0: logging.info("Using the following GPUs to train: " + str(gpus)) num_towers = num_gpus device_string = "/gpu:%d" else: logging.info("No GPUs found. Training on CPU.") num_towers = 1 device_string = "/cpu:%d" learning_rate = tf.train.exponential_decay(base_learning_rate, global_step * batch_size * num_towers, learning_rate_decay_examples, learning_rate_decay, staircase=True) tf.summary.scalar("learning_rate", learning_rate) optimizer = optimizer_class(learning_rate) input_data_dict = (get_input_data_tensors(reader, train_data_pattern, batch_size=batch_size * num_towers, num_readers=num_readers, num_epochs=num_epochs)) model_input_raw = input_data_dict["video_matrix"] labels_batch = input_data_dict["labels"] num_frames = input_data_dict["num_frames"] print("model_input_shape, ", model_input_raw.shape) tf.summary.histogram("model/input_raw", model_input_raw) feature_dim = len(model_input_raw.get_shape()) - 1 model_input = tf.nn.l2_normalize(model_input_raw, feature_dim) tower_inputs = tf.split(model_input, num_towers) tower_labels = tf.split(labels_batch, num_towers) tower_num_frames = tf.split(num_frames, num_towers) tower_gradients = [] tower_predictions = [] tower_label_losses = [] tower_reg_losses = [] for i in range(num_towers): # For some reason these 'with' statements can't be combined onto the same # line. They have to be nested. with tf.device(device_string % i): with (tf.variable_scope(("tower"), reuse=True if i > 0 else None)): with (slim.arg_scope([slim.model_variable, slim.variable], device="/cpu:0" if num_gpus != 1 else "/gpu:0")): result = model.create_model(tower_inputs[i], num_frames=tower_num_frames[i], vocab_size=reader.num_classes, labels=tower_labels[i]) for variable in slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable) predictions = result["predictions"] tower_predictions.append(predictions) if "loss" in result.keys(): label_loss = result["loss"] else: label_loss = label_loss_fn.calculate_loss(predictions, tower_labels[i]) if "regularization_loss" in result.keys(): reg_loss = result["regularization_loss"] else: reg_loss = tf.constant(0.0) reg_losses = tf.losses.get_regularization_losses() if reg_losses: reg_loss += tf.add_n(reg_losses) tower_reg_losses.append(reg_loss) # Adds update_ops (e.g., moving average updates in batch normalization) as # a dependency to the train_op. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) if "update_ops" in result.keys(): update_ops += result["update_ops"] if update_ops: with tf.control_dependencies(update_ops): barrier = tf.no_op(name="gradient_barrier") with tf.control_dependencies([barrier]): label_loss = tf.identity(label_loss) tower_label_losses.append(label_loss) # Incorporate the L2 weight penalties etc. final_loss = regularization_penalty * reg_loss + label_loss gradients = optimizer.compute_gradients( final_loss, colocate_gradients_with_ops=False) tower_gradients.append(gradients) label_loss = tf.reduce_mean(tf.stack(tower_label_losses)) tf.summary.scalar("label_loss", label_loss) if regularization_penalty != 0: reg_loss = tf.reduce_mean(tf.stack(tower_reg_losses)) tf.summary.scalar("reg_loss", reg_loss) merged_gradients = utils.combine_gradients(tower_gradients) if clip_gradient_norm > 0: with tf.name_scope("clip_grads"): merged_gradients = utils.clip_gradient_norms(merged_gradients, clip_gradient_norm) train_op = optimizer.apply_gradients(merged_gradients, global_step=global_step) tf.add_to_collection("global_step", global_step) tf.add_to_collection("loss", label_loss) tf.add_to_collection("predictions", tf.concat(tower_predictions, 0)) tf.add_to_collection("input_batch_raw", model_input_raw) tf.add_to_collection("input_batch", model_input) tf.add_to_collection("num_frames", num_frames) tf.add_to_collection("labels", tf.cast(labels_batch, tf.float32)) tf.add_to_collection("train_op", train_op)
Creates a Server. Args: cluster: A tf.train.ClusterSpec if the execution is distributed. None otherwise. task: A TaskSpec describing the job type and the task index.
def start_server(cluster, task): """Creates a Server. Args: cluster: A tf.train.ClusterSpec if the execution is distributed. None otherwise. task: A TaskSpec describing the job type and the task index. """ if not task.type: raise ValueError("%s: The task type must be specified." % task_as_string(task)) if task.index is None: raise ValueError("%s: The task index must be specified." % task_as_string(task)) # Create and start a server. return tf.train.Server(tf.train.ClusterSpec(cluster), protocol="grpc", job_name=task.type, task_index=task.index)
Dequantize the feature from the byte format to the float format. Args: feat_vector: the input 1-d vector. max_quantized_value: the maximum of the quantized value. min_quantized_value: the minimum of the quantized value. Returns: A float vector which has the same shape as feat_vector.
def Dequantize(feat_vector, max_quantized_value=2, min_quantized_value=-2): """Dequantize the feature from the byte format to the float format. Args: feat_vector: the input 1-d vector. max_quantized_value: the maximum of the quantized value. min_quantized_value: the minimum of the quantized value. Returns: A float vector which has the same shape as feat_vector. """ assert max_quantized_value > min_quantized_value quantized_range = max_quantized_value - min_quantized_value scalar = quantized_range / 255.0 bias = (quantized_range / 512.0) + min_quantized_value return feat_vector * scalar + bias
Creates a tf.Summary proto with the given name and value.
def MakeSummary(name, value): """Creates a tf.Summary proto with the given name and value.""" summary = tf.Summary() val = summary.value.add() val.tag = str(name) val.simple_value = float(value) return summary
Add the global_step summary to the Tensorboard. Args: summary_writer: Tensorflow summary_writer. global_step_val: a int value of the global step. global_step_info_dict: a dictionary of the evaluation metrics calculated for a mini-batch. summary_scope: Train or Eval. Returns: A string of this global_step summary
def AddGlobalStepSummary(summary_writer, global_step_val, global_step_info_dict, summary_scope="Eval"): """Add the global_step summary to the Tensorboard. Args: summary_writer: Tensorflow summary_writer. global_step_val: a int value of the global step. global_step_info_dict: a dictionary of the evaluation metrics calculated for a mini-batch. summary_scope: Train or Eval. Returns: A string of this global_step summary """ this_hit_at_one = global_step_info_dict["hit_at_one"] this_perr = global_step_info_dict["perr"] this_loss = global_step_info_dict["loss"] examples_per_second = global_step_info_dict.get("examples_per_second", -1) summary_writer.add_summary( MakeSummary("GlobalStep/" + summary_scope + "_Hit@1", this_hit_at_one), global_step_val) summary_writer.add_summary( MakeSummary("GlobalStep/" + summary_scope + "_Perr", this_perr), global_step_val) summary_writer.add_summary( MakeSummary("GlobalStep/" + summary_scope + "_Loss", this_loss), global_step_val) if examples_per_second != -1: summary_writer.add_summary( MakeSummary("GlobalStep/" + summary_scope + "_Example_Second", examples_per_second), global_step_val) summary_writer.flush() info = ( "global_step {0} | Batch Hit@1: {1:.3f} | Batch PERR: {2:.3f} | Batch " "Loss: {3:.3f} | Examples_per_sec: {4:.3f}").format( global_step_val, this_hit_at_one, this_perr, this_loss, examples_per_second) return info
Add the epoch summary to the Tensorboard. Args: summary_writer: Tensorflow summary_writer. global_step_val: a int value of the global step. epoch_info_dict: a dictionary of the evaluation metrics calculated for the whole epoch. summary_scope: Train or Eval. Returns: A string of this global_step summary
def AddEpochSummary(summary_writer, global_step_val, epoch_info_dict, summary_scope="Eval"): """Add the epoch summary to the Tensorboard. Args: summary_writer: Tensorflow summary_writer. global_step_val: a int value of the global step. epoch_info_dict: a dictionary of the evaluation metrics calculated for the whole epoch. summary_scope: Train or Eval. Returns: A string of this global_step summary """ epoch_id = epoch_info_dict["epoch_id"] avg_hit_at_one = epoch_info_dict["avg_hit_at_one"] avg_perr = epoch_info_dict["avg_perr"] avg_loss = epoch_info_dict["avg_loss"] aps = epoch_info_dict["aps"] gap = epoch_info_dict["gap"] mean_ap = numpy.mean(aps) summary_writer.add_summary( MakeSummary("Epoch/" + summary_scope + "_Avg_Hit@1", avg_hit_at_one), global_step_val) summary_writer.add_summary( MakeSummary("Epoch/" + summary_scope + "_Avg_Perr", avg_perr), global_step_val) summary_writer.add_summary( MakeSummary("Epoch/" + summary_scope + "_Avg_Loss", avg_loss), global_step_val) summary_writer.add_summary( MakeSummary("Epoch/" + summary_scope + "_MAP", mean_ap), global_step_val) summary_writer.add_summary( MakeSummary("Epoch/" + summary_scope + "_GAP", gap), global_step_val) summary_writer.flush() info = ("epoch/eval number {0} | Avg_Hit@1: {1:.3f} | Avg_PERR: {2:.3f} " "| MAP: {3:.3f} | GAP: {4:.3f} | Avg_Loss: {5:3f} | num_classes: {6}" ).format(epoch_id, avg_hit_at_one, avg_perr, mean_ap, gap, avg_loss, len(aps)) return info
Extract the list of feature names and the dimensionality of each feature from string of comma separated values. Args: feature_names: string containing comma separated list of feature names feature_sizes: string containing comma separated list of feature sizes Returns: List of the feature names and list of the dimensionality of each feature. Elements in the first/second list are strings/integers.
def GetListOfFeatureNamesAndSizes(feature_names, feature_sizes): """Extract the list of feature names and the dimensionality of each feature from string of comma separated values. Args: feature_names: string containing comma separated list of feature names feature_sizes: string containing comma separated list of feature sizes Returns: List of the feature names and list of the dimensionality of each feature. Elements in the first/second list are strings/integers. """ list_of_feature_names = [ feature_names.strip() for feature_names in feature_names.split(",") ] list_of_feature_sizes = [ int(feature_sizes) for feature_sizes in feature_sizes.split(",") ] if len(list_of_feature_names) != len(list_of_feature_sizes): logging.error("length of the feature names (=" + str(len(list_of_feature_names)) + ") != length of feature " "sizes (=" + str(len(list_of_feature_sizes)) + ")") return list_of_feature_names, list_of_feature_sizes
Clips the gradients by the given value. Args: gradients_to_variables: A list of gradient to variable pairs (tuples). max_norm: the maximum norm value. Returns: A list of clipped gradient to variable pairs.
def clip_gradient_norms(gradients_to_variables, max_norm): """Clips the gradients by the given value. Args: gradients_to_variables: A list of gradient to variable pairs (tuples). max_norm: the maximum norm value. Returns: A list of clipped gradient to variable pairs. """ clipped_grads_and_vars = [] for grad, var in gradients_to_variables: if grad is not None: if isinstance(grad, tf.IndexedSlices): tmp = tf.clip_by_norm(grad.values, max_norm) grad = tf.IndexedSlices(tmp, grad.indices, grad.dense_shape) else: grad = tf.clip_by_norm(grad, max_norm) clipped_grads_and_vars.append((grad, var)) return clipped_grads_and_vars
Calculate the combined gradient for each shared variable across all towers. Note that this function provides a synchronization point across all towers. Args: tower_grads: List of lists of (gradient, variable) tuples. The outer list is over individual gradients. The inner list is over the gradient calculation for each tower. Returns: List of pairs of (gradient, variable) where the gradient has been summed across all towers.
def combine_gradients(tower_grads): """Calculate the combined gradient for each shared variable across all towers. Note that this function provides a synchronization point across all towers. Args: tower_grads: List of lists of (gradient, variable) tuples. The outer list is over individual gradients. The inner list is over the gradient calculation for each tower. Returns: List of pairs of (gradient, variable) where the gradient has been summed across all towers. """ filtered_grads = [ [x for x in grad_list if x[0] is not None] for grad_list in tower_grads ] final_grads = [] for i in xrange(len(filtered_grads[0])): grads = [filtered_grads[t][i] for t in xrange(len(filtered_grads))] grad = tf.stack([x[0] for x in grads], 0) grad = tf.reduce_sum(grad, 0) final_grads.append(( grad, filtered_grads[0][i][1], )) return final_grads
Uses OpenCV to iterate over all frames of filename at a given frequency. Args: filename: Path to video file (e.g. mp4) every_ms: The duration (in milliseconds) to skip between frames. max_num_frames: Maximum number of frames to process, taken from the beginning of the video. Yields: RGB frame with shape (image height, image width, channels)
def frame_iterator(filename, every_ms=1000, max_num_frames=300): """Uses OpenCV to iterate over all frames of filename at a given frequency. Args: filename: Path to video file (e.g. mp4) every_ms: The duration (in milliseconds) to skip between frames. max_num_frames: Maximum number of frames to process, taken from the beginning of the video. Yields: RGB frame with shape (image height, image width, channels) """ video_capture = cv2.VideoCapture() if not video_capture.open(filename): print >> sys.stderr, 'Error: Cannot open video file ' + filename return last_ts = -99999 # The timestamp of last retrieved frame. num_retrieved = 0 while num_retrieved < max_num_frames: # Skip frames while video_capture.get(CAP_PROP_POS_MSEC) < every_ms + last_ts: if not video_capture.read()[0]: return last_ts = video_capture.get(CAP_PROP_POS_MSEC) has_frames, frame = video_capture.read() if not has_frames: break yield frame num_retrieved += 1
Quantizes float32 `features` into string.
def quantize(features, min_quantized_value=-2.0, max_quantized_value=2.0): """Quantizes float32 `features` into string.""" assert features.dtype == 'float32' assert len(features.shape) == 1 # 1-D array features = numpy.clip(features, min_quantized_value, max_quantized_value) quantize_range = max_quantized_value - min_quantized_value features = (features - min_quantized_value) * (255.0 / quantize_range) features = [int(round(f)) for f in features] return _make_bytes(features)
Calculates element-wise percent difference between two numpy matrices.
def _MeanElementWiseDifference(a, b): """Calculates element-wise percent difference between two numpy matrices.""" difference = numpy.abs(a - b) denominator = numpy.maximum(numpy.abs(a), numpy.abs(b)) # We dont care if one is 0 and another is 0.01 return (difference / (0.01 + denominator)).mean()
Get the version without importing the package
def read_version(fname='youtube_dl/version.py'): """Get the version without importing the package""" exec(compile(read_file(fname), fname, 'exec')) return locals()['__version__']
Remove a file if it exists
def try_rm(filename): """ Remove a file if it exists """ try: os.remove(filename) except OSError as ose: if ose.errno != errno.ENOENT: raise
Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored
def report_warning(message): ''' Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored ''' if sys.stderr.isatty() and compat_os_name != 'nt': _msg_header = '\033[0;33mWARNING:\033[0m' else: _msg_header = 'WARNING:' output = '%s %s\n' % (_msg_header, message) if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3: output = output.encode(preferredencoding()) sys.stderr.write(output)
Returns true if the file has been downloaded
def _download_restricted(url, filename, age): """ Returns true if the file has been downloaded """ params = { 'age_limit': age, 'skip_download': True, 'writeinfojson': True, 'outtmpl': '%(id)s.%(ext)s', } ydl = YoutubeDL(params) ydl.add_default_info_extractors() json_filename = os.path.splitext(filename)[0] + '.info.json' try_rm(json_filename) try: ydl.download([url]) except DownloadError: try_rm(json_filename) res = os.path.exists(json_filename) try_rm(json_filename) return res
PKCS#7 padding @param {int[]} data cleartext @returns {int[]} padding data
def pkcs7_padding(data): """ PKCS#7 padding @param {int[]} data cleartext @returns {int[]} padding data """ remaining_length = BLOCK_SIZE_BYTES - len(data) % BLOCK_SIZE_BYTES return data + [remaining_length] * remaining_length
Decrypt with aes in counter mode @param {int[]} data cipher @param {int[]} key 16/24/32-Byte cipher key @param {instance} counter Instance whose next_value function (@returns {int[]} 16-Byte block) returns the next counter block @returns {int[]} decrypted data
def aes_ctr_decrypt(data, key, counter): """ Decrypt with aes in counter mode @param {int[]} data cipher @param {int[]} key 16/24/32-Byte cipher key @param {instance} counter Instance whose next_value function (@returns {int[]} 16-Byte block) returns the next counter block @returns {int[]} decrypted data """ expanded_key = key_expansion(key) block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) decrypted_data = [] for i in range(block_count): counter_block = counter.next_value() block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] block += [0] * (BLOCK_SIZE_BYTES - len(block)) cipher_counter_block = aes_encrypt(counter_block, expanded_key) decrypted_data += xor(block, cipher_counter_block) decrypted_data = decrypted_data[:len(data)] return decrypted_data
Decrypt with aes in CBC mode @param {int[]} data cipher @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte IV @returns {int[]} decrypted data
def aes_cbc_decrypt(data, key, iv): """ Decrypt with aes in CBC mode @param {int[]} data cipher @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte IV @returns {int[]} decrypted data """ expanded_key = key_expansion(key) block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) decrypted_data = [] previous_cipher_block = iv for i in range(block_count): block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] block += [0] * (BLOCK_SIZE_BYTES - len(block)) decrypted_block = aes_decrypt(block, expanded_key) decrypted_data += xor(decrypted_block, previous_cipher_block) previous_cipher_block = block decrypted_data = decrypted_data[:len(data)] return decrypted_data
Encrypt with aes in CBC mode. Using PKCS#7 padding @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte IV @returns {int[]} encrypted data
def aes_cbc_encrypt(data, key, iv): """ Encrypt with aes in CBC mode. Using PKCS#7 padding @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte IV @returns {int[]} encrypted data """ expanded_key = key_expansion(key) block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) encrypted_data = [] previous_cipher_block = iv for i in range(block_count): block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] block = pkcs7_padding(block) mixed_block = xor(block, previous_cipher_block) encrypted_block = aes_encrypt(mixed_block, expanded_key) encrypted_data += encrypted_block previous_cipher_block = encrypted_block return encrypted_data
Encrypt with aes in ECB mode. Using PKCS#7 padding @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @returns {int[]} encrypted data
def aes_ecb_encrypt(data, key): """ Encrypt with aes in ECB mode. Using PKCS#7 padding @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @returns {int[]} encrypted data """ expanded_key = key_expansion(key) block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) encrypted_data = [] for i in range(block_count): block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] block = pkcs7_padding(block) encrypted_block = aes_encrypt(block, expanded_key) encrypted_data += encrypted_block return encrypted_data
Generate key schedule @param {int[]} data 16/24/32-Byte cipher key @returns {int[]} 176/208/240-Byte expanded key
def key_expansion(data): """ Generate key schedule @param {int[]} data 16/24/32-Byte cipher key @returns {int[]} 176/208/240-Byte expanded key """ data = data[:] # copy rcon_iteration = 1 key_size_bytes = len(data) expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES while len(data) < expanded_key_size_bytes: temp = data[-4:] temp = key_schedule_core(temp, rcon_iteration) rcon_iteration += 1 data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) for _ in range(3): temp = data[-4:] data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) if key_size_bytes == 32: temp = data[-4:] temp = sub_bytes(temp) data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0): temp = data[-4:] data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) data = data[:expanded_key_size_bytes] return data
Encrypt one block with aes @param {int[]} data 16-Byte state @param {int[]} expanded_key 176/208/240-Byte expanded key @returns {int[]} 16-Byte cipher
def aes_encrypt(data, expanded_key): """ Encrypt one block with aes @param {int[]} data 16-Byte state @param {int[]} expanded_key 176/208/240-Byte expanded key @returns {int[]} 16-Byte cipher """ rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) for i in range(1, rounds + 1): data = sub_bytes(data) data = shift_rows(data) if i != rounds: data = mix_columns(data) data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]) return data
Decrypt one block with aes @param {int[]} data 16-Byte cipher @param {int[]} expanded_key 176/208/240-Byte expanded key @returns {int[]} 16-Byte state
def aes_decrypt(data, expanded_key): """ Decrypt one block with aes @param {int[]} data 16-Byte cipher @param {int[]} expanded_key 176/208/240-Byte expanded key @returns {int[]} 16-Byte state """ rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 for i in range(rounds, 0, -1): data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]) if i != rounds: data = mix_columns_inv(data) data = shift_rows_inv(data) data = sub_bytes_inv(data) data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) return data
Decrypt text - The first 8 Bytes of decoded 'data' are the 8 high Bytes of the counter - The cipher key is retrieved by encrypting the first 16 Byte of 'password' with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's) - Mode of operation is 'counter' @param {str} data Base64 encoded string @param {str,unicode} password Password (will be encoded with utf-8) @param {int} key_size_bytes Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit @returns {str} Decrypted data
def aes_decrypt_text(data, password, key_size_bytes): """ Decrypt text - The first 8 Bytes of decoded 'data' are the 8 high Bytes of the counter - The cipher key is retrieved by encrypting the first 16 Byte of 'password' with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's) - Mode of operation is 'counter' @param {str} data Base64 encoded string @param {str,unicode} password Password (will be encoded with utf-8) @param {int} key_size_bytes Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit @returns {str} Decrypted data """ NONCE_LENGTH_BYTES = 8 data = bytes_to_intlist(compat_b64decode(data)) password = bytes_to_intlist(password.encode('utf-8')) key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password)) key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES) nonce = data[:NONCE_LENGTH_BYTES] cipher = data[NONCE_LENGTH_BYTES:] class Counter(object): __value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES) def next_value(self): temp = self.__value self.__value = inc(self.__value) return temp decrypted_data = aes_ctr_decrypt(cipher, key, Counter()) plaintext = intlist_to_bytes(decrypted_data) return plaintext
Simulate JS's ternary operator (cndn?if_true:if_false)
def _js_ternary(cndn, if_true=True, if_false=False): """Simulate JS's ternary operator (cndn?if_true:if_false)""" if cndn in (False, None, 0, '', JS_Undefined, _NaN): return if_false return if_true
Update the program file with the latest version from the repository
def update_self(to_screen, verbose, opener): """Update the program file with the latest version from the repository""" UPDATE_URL = 'https://yt-dl.org/update/' VERSION_URL = UPDATE_URL + 'LATEST_VERSION' JSON_URL = UPDATE_URL + 'versions.json' UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537) if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, 'frozen'): to_screen('It looks like you installed youtube-dl with a package manager, pip, setup.py or a tarball. Please use that to update.') return # Check if there is a new version try: newversion = opener.open(VERSION_URL).read().decode('utf-8').strip() except Exception: if verbose: to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: can\'t find the current version. Please try again later.') return if newversion == __version__: to_screen('youtube-dl is up-to-date (' + __version__ + ')') return # Download and check versions info try: versions_info = opener.open(JSON_URL).read().decode('utf-8') versions_info = json.loads(versions_info) except Exception: if verbose: to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: can\'t obtain versions info. Please try again later.') return if 'signature' not in versions_info: to_screen('ERROR: the versions file is not signed or corrupted. Aborting.') return signature = versions_info['signature'] del versions_info['signature'] if not rsa_verify(json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY): to_screen('ERROR: the versions file signature is invalid. Aborting.') return version_id = versions_info['latest'] def version_tuple(version_str): return tuple(map(int, version_str.split('.'))) if version_tuple(__version__) >= version_tuple(version_id): to_screen('youtube-dl is up to date (%s)' % __version__) return to_screen('Updating to version ' + version_id + ' ...') version = versions_info['versions'][version_id] print_notes(to_screen, versions_info['versions']) # sys.executable is set to the full pathname of the exe-file for py2exe # though symlinks are not followed so that we need to do this manually # with help of realpath filename = compat_realpath(sys.executable if hasattr(sys, 'frozen') else sys.argv[0]) if not os.access(filename, os.W_OK): to_screen('ERROR: no write permissions on %s' % filename) return # Py2EXE if hasattr(sys, 'frozen'): exe = filename directory = os.path.dirname(exe) if not os.access(directory, os.W_OK): to_screen('ERROR: no write permissions on %s' % directory) return try: urlh = opener.open(version['exe'][0]) newcontent = urlh.read() urlh.close() except (IOError, OSError): if verbose: to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to download latest version') return newcontent_hash = hashlib.sha256(newcontent).hexdigest() if newcontent_hash != version['exe'][1]: to_screen('ERROR: the downloaded file hash does not match. Aborting.') return try: with open(exe + '.new', 'wb') as outf: outf.write(newcontent) except (IOError, OSError): if verbose: to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to write the new version') return try: bat = os.path.join(directory, 'youtube-dl-updater.bat') with open(bat, 'w') as batfile: batfile.write(''' @echo off echo Waiting for file handle to be closed ... ping 127.0.0.1 -n 5 -w 1000 > NUL move /Y "%s.new" "%s" > NUL echo Updated youtube-dl to version %s. start /b "" cmd /c del "%%~f0"&exit /b" \n''' % (exe, exe, version_id)) subprocess.Popen([bat]) # Continues to run in the background return # Do not show premature success messages except (IOError, OSError): if verbose: to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to overwrite current version') return # Zip unix package elif isinstance(globals().get('__loader__'), zipimporter): try: urlh = opener.open(version['bin'][0]) newcontent = urlh.read() urlh.close() except (IOError, OSError): if verbose: to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to download latest version') return newcontent_hash = hashlib.sha256(newcontent).hexdigest() if newcontent_hash != version['bin'][1]: to_screen('ERROR: the downloaded file hash does not match. Aborting.') return try: with open(filename, 'wb') as outf: outf.write(newcontent) except (IOError, OSError): if verbose: to_screen(encode_compat_str(traceback.format_exc())) to_screen('ERROR: unable to overwrite current version') return to_screen('Updated youtube-dl. Restart youtube-dl to use the new version.')
Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks.
def preferredencoding(): """Get preferred encoding. Returns the best encoding scheme for the system, based on locale.getpreferredencoding() and some further tweaks. """ try: pref = locale.getpreferredencoding() 'TEST'.encode(pref) except Exception: pref = 'UTF-8' return pref
Encode obj as JSON and write it to fn, atomically if possible
def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ fn = encodeFilename(fn) if sys.version_info < (3, 0) and sys.platform != 'win32': encoding = get_filesystem_encoding() # os.path.basename returns a bytes object, but NamedTemporaryFile # will fail if the filename contains non-ascii characters unless we # use a unicode object path_basename = lambda f: os.path.basename(f).decode(encoding) # the same for os.path.dirname path_dirname = lambda f: os.path.dirname(f).decode(encoding) else: path_basename = os.path.basename path_dirname = os.path.dirname args = { 'suffix': '.tmp', 'prefix': path_basename(fn) + '.', 'dir': path_dirname(fn), 'delete': False, } # In Python 2.x, json.dump expects a bytestream. # In Python 3.x, it writes to a character stream if sys.version_info < (3, 0): args['mode'] = 'wb' else: args.update({ 'mode': 'w', 'encoding': 'utf-8', }) tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) try: with tf: json.dump(obj, tf) with compat_contextlib_suppress(OSError): if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. os.unlink(fn) mask = os.umask(0) os.umask(mask) os.chmod(tf.name, 0o666 & ~mask) os.rename(tf.name, fn) except Exception: with compat_contextlib_suppress(OSError): os.remove(tf.name) raise
Return the content of the tag with the specified ID in the passed HTML document
def get_element_by_id(id, html): """Return the content of the tag with the specified ID in the passed HTML document""" return get_element_by_attribute('id', id, html)
Return the content of the first tag with the specified class in the passed HTML document
def get_element_by_class(class_name, html): """Return the content of the first tag with the specified class in the passed HTML document""" retval = get_elements_by_class(class_name, html) return retval[0] if retval else None
Return the content of all tags with the specified class in the passed HTML document as a list
def get_elements_by_class(class_name, html): """Return the content of all tags with the specified class in the passed HTML document as a list""" return get_elements_by_attribute( 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), html, escape_value=False)
Return the content of the tag with the specified attribute in the passed HTML document
def get_elements_by_attribute(attribute, value, html, escape_value=True): """Return the content of the tag with the specified attribute in the passed HTML document""" value = re.escape(value) if escape_value else value retlist = [] for m in re.finditer(r'''(?xs) <([a-zA-Z0-9:._-]+) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? \s+%s=['"]?%s['"]? (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? \s*> (?P<content>.*?) </\1> ''' % (re.escape(attribute), value), html): res = m.group('content') if res.startswith('"') or res.startswith("'"): res = res[1:-1] retlist.append(unescapeHTML(res)) return retlist
Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': ''' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
def extract_attributes(html_element): """Given a string for an HTML element such as <el a="foo" B="bar" c="&98;az" d=boz empty= noval entity="&amp;" sq='"' dq="'" > Decode and return a dictionary of attributes. { 'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz', 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ ret = None # Older Python may throw HTMLParseError in case of malformed HTML (and on .close()!) with compat_contextlib_suppress(compat_HTMLParseError): with contextlib.closing(HTMLAttributeParser()) as parser: parser.feed(html_element) ret = parser.attrs return ret or {}
Clean an HTML snippet into a readable string
def clean_html(html): """Clean an HTML snippet into a readable string""" if html is None: # Convenience for sanitizing descriptions etc. return html # Newline vs <br /> html = html.replace('\n', ' ') html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities html = unescapeHTML(html) return html.strip()