response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name).
def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. Attempts to open the given filename. If this fails, it tries to change the filename slightly, step by step, until it's either able to open it or it fails and raises a final exception, like the standard open() function. It returns the tuple (stream, definitive_file_name). """ try: if filename == '-': if sys.platform == 'win32': import msvcrt msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) stream = open(encodeFilename(filename), open_mode) return (stream, filename) except (IOError, OSError) as err: if err.errno in (errno.EACCES,): raise # In case of error, try to remove win32 forbidden chars alt_filename = sanitize_path(filename) if alt_filename == filename: raise else: # An exception here should be caught in the caller stream = open(encodeFilename(alt_filename), open_mode) return (stream, alt_filename)
Convert RFC 2822 defined time string into system timestamp
def timeconvert(timestr): """Convert RFC 2822 defined time string into system timestamp""" timestamp = None timetuple = email.utils.parsedate_tz(timestr) if timetuple is not None: timestamp = email.utils.mktime_tz(timetuple) return timestamp
Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible.
def sanitize_filename(s, restricted=False, is_id=False): """Sanitizes a string so it could be used as part of a filename. If restricted is set, use a stricter subset of allowed characters. Set is_id if this is not an arbitrary string, but an ID that should be kept if possible. """ def replace_insane(char): if restricted and char in ACCENT_CHARS: return ACCENT_CHARS[char] if char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': return '' if restricted else '\'' elif char == ':': return '_-' if restricted else ' -' elif char in '\\/|*<>': return '_' if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()): return '_' if restricted and ord(char) > 127: return '' if unicodedata.category(char)[0] in 'CM' else '_' return char # Replace look-alike Unicode glyphs if restricted and not is_id: s = unicodedata.normalize('NFKC', s) # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) result = ''.join(map(replace_insane, s)) if not is_id: while '__' in result: result = result.replace('__', '_') result = result.strip('_') # Common case of "Foreign band name - English song title" if restricted and result.startswith('-_'): result = result[2:] if result.startswith('-'): result = '_' + result[len('-'):] result = result.lstrip('.') if not result: result = '_' return result
Sanitizes and normalizes path on Windows
def sanitize_path(s): """Sanitizes and normalizes path on Windows""" if sys.platform != 'win32': return s drive_or_unc, _ = os.path.splitdrive(s) if sys.version_info < (2, 7) and not drive_or_unc: drive_or_unc, _ = os.path.splitunc(s) norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep) if drive_or_unc: norm_path.pop(0) sanitized_path = [ path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part) for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) return os.path.join(*sanitized_path)
Expand shell variables and ~
def expand_path(s): """Expand shell variables and ~""" return os.path.expandvars(compat_expanduser(s))
Remove all duplicates from the input iterable
def orderedSet(iterable): """ Remove all duplicates from the input iterable """ res = [] for el in iterable: if el not in res: res.append(el) return res
Transforms an HTML entity to a character.
def _htmlentity_transform(entity_with_semicolon): """Transforms an HTML entity to a character.""" entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity if entity in compat_html_entities.name2codepoint: return compat_chr(compat_html_entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # '&Eacuteric' should be decoded as 'Éric'. if entity_with_semicolon in compat_html_entities_html5: return compat_html_entities_html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: numstr = mobj.group(1) if numstr.startswith('x'): base = 16 numstr = '0%s' % numstr else: base = 10 # See https://github.com/ytdl-org/youtube-dl/issues/7518\ # Also, weirdly, compat_contextlib_suppress fails here in 2.6 try: return compat_chr(int(numstr, base)) except ValueError: pass # Unknown entity in name, return its literal representation return '&%s;' % entity
Return a UNIX timestamp from the given date
def parse_iso8601(date_str, delimiter='T', timezone=None): """ Return a UNIX timestamp from the given date """ if date_str is None: return None date_str = re.sub(r'\.[0-9]+', '', date_str) if timezone is None: timezone, date_str = extract_timezone(date_str) with compat_contextlib_suppress(ValueError): date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple())
Return a string with the date in the format YYYYMMDD
def unified_strdate(date_str, day_first=True): """Return a string with the date in the format YYYYMMDD""" if date_str is None: return None upload_date = None # Replace commas date_str = date_str.replace(',', ' ') # Remove AM/PM + timezone date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str) _, date_str = extract_timezone(date_str) for expression in date_formats(day_first): with compat_contextlib_suppress(ValueError): upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: with compat_contextlib_suppress(ValueError): upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') if upload_date is not None: return compat_str(upload_date)
Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?
def date_from_str(date_str): """ Return a datetime object from a string in the format YYYYMMDD or (now|today)[+-][0-9](day|week|month|year)(s)?""" today = datetime.date.today() if date_str in ('now', 'today'): return today if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match(r'(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str) if match is not None: sign = match.group('sign') time = int(match.group('time')) if sign == '-': time = -time unit = match.group('unit') # A bad approximation? if unit == 'month': unit = 'day' time *= 30 elif unit == 'year': unit = 'day' time *= 365 unit += 's' delta = datetime.timedelta(**{unit: time}) return today + delta return datetime.datetime.strptime(date_str, '%Y%m%d').date()
Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format
def hyphenate_date(date_str): """ Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format""" match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str) if match is not None: return '-'.join(match.groups()) else: return date_str
Returns the platform name as a compat_str
def platform_name(): """ Returns the platform name as a compat_str """ res = platform.platform() return _decode_compat_str(res)
Returns True if the string was written using special methods, False if it has yet to be written out.
def _windows_write_string(s, out): """ Returns True if the string was written using special methods, False if it has yet to be written out.""" # Adapted from http://stackoverflow.com/a/3259271/35070 import ctypes import ctypes.wintypes WIN_OUTPUT_IDS = { 1: -11, 2: -12, } try: fileno = out.fileno() except AttributeError: # If the output stream doesn't have a fileno, it's virtual return False except io.UnsupportedOperation: # Some strange Windows pseudo files? return False if fileno not in WIN_OUTPUT_IDS: return False GetStdHandle = compat_ctypes_WINFUNCTYPE( ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( ('GetStdHandle', ctypes.windll.kernel32)) h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) WriteConsoleW = compat_ctypes_WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32)) written = ctypes.wintypes.DWORD(0) GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32)) FILE_TYPE_CHAR = 0x0002 FILE_TYPE_REMOTE = 0x8000 GetConsoleMode = compat_ctypes_WINFUNCTYPE( ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.POINTER(ctypes.wintypes.DWORD))( ('GetConsoleMode', ctypes.windll.kernel32)) INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value def not_a_console(handle): if handle == INVALID_HANDLE_VALUE or handle is None: return True return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) if not_a_console(h): return False def next_nonbmp_pos(s): try: return next(i for i, c in enumerate(s) if ord(c) > 0xffff) except StopIteration: return len(s) while s: count = min(next_nonbmp_pos(s), 1024) ret = WriteConsoleW( h, s, count if count else 2, ctypes.byref(written), None) if ret == 0: raise OSError('Failed to write string') if not count: # We just wrote a non-BMP character assert written.value == 2 s = s[1:] else: assert written.value > 0 s = s[written.value:] return True
Pass additional data in a URL for internal use.
def smuggle_url(url, data): """ Pass additional data in a URL for internal use. """ url, idata = unsmuggle_url(url, {}) data.update(idata) sdata = compat_urllib_parse_urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata
Return the number of a month by (locale-independently) English name
def month_by_name(name, lang='en'): """ Return the number of a month by (locale-independently) English name """ month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en']) try: return month_names.index(name) + 1 except ValueError: return None
Return the number of a month by (locale-independently) English abbreviations
def month_by_abbreviation(abbrev): """ Return the number of a month by (locale-independently) English abbreviations """ try: return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1 except ValueError: return None
Replace all the '&' by '&amp;' in XML
def fix_xml_ampersands(xml_str): """Replace all the '&' by '&amp;' in XML""" return re.sub( r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)', '&amp;', xml_str)
A more relaxed version of int_or_none
def str_to_int(int_str): """ A more relaxed version of int_or_none """ if isinstance(int_str, compat_integer_types): return int_str elif isinstance(int_str, compat_str): int_str = re.sub(r'[,\.\+]', '', int_str) return int_or_none(int_str)
Combine str/strip_or_none, disallow blank value (for traverse_obj)
def txt_or_none(v, default=None): """ Combine str/strip_or_none, disallow blank value (for traverse_obj) """ return default if v is None else (compat_str(v).strip() or default)
Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version)
def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: process_communicate_or_kill(subprocess.Popen( [exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)) except OSError: return False return exe
Returns the version of the specified executable, or False if the executable is not present
def get_exe_version(exe, args=['--version'], version_re=None, unrecognized='present'): """ Returns the version of the specified executable, or False if the executable is not present """ try: # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers # SIGTTOU if youtube-dl is run in the background. # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656 out, _ = process_communicate_or_kill(subprocess.Popen( [encodeArgument(exe)] + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)) except OSError: return False out = _decode_compat_str(out, 'ascii', 'ignore') return detect_exe_version(out, version_re, unrecognized)
Escape non-ASCII characters as suggested by RFC 3986
def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" if sys.version_info < (3, 0): s = _encode_compat_str(s, 'utf-8') # ensure unicode: after quoting, it can always be converted return compat_str(compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]"))
Escape URL as suggested by RFC 3986
def escape_url(url): """Escape URL as suggested by RFC 3986""" url_parsed = compat_urllib_parse_urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), params=escape_rfc3986(url_parsed.params), query=escape_rfc3986(url_parsed.query), fragment=escape_rfc3986(url_parsed.fragment) ).geturl()
Replace URL components specified by kwargs url: compat_str or parsed URL tuple if query_update is in kwargs, update query with its value instead of replacing (overrides any `query`) NB: query_update expects parse_qs() format: [key: value_list, ...] returns: compat_str
def update_url(url, **kwargs): """Replace URL components specified by kwargs url: compat_str or parsed URL tuple if query_update is in kwargs, update query with its value instead of replacing (overrides any `query`) NB: query_update expects parse_qs() format: [key: value_list, ...] returns: compat_str """ if not kwargs: return compat_urllib_parse.urlunparse(url) if isinstance(url, tuple) else url if not isinstance(url, tuple): url = compat_urllib_parse.urlparse(url) query = kwargs.pop('query_update', None) if query: qs = compat_parse_qs(url.query) qs.update(query) kwargs['query'] = compat_urllib_parse_urlencode(qs, True) kwargs = compat_kwargs(kwargs) return compat_urllib_parse.urlunparse(url._replace(**kwargs))
Encode a dict to RFC 7578-compliant form-data data: A dict where keys and values can be either Unicode or bytes-like objects. boundary: If specified a Unicode object, it's used as the boundary. Otherwise a random boundary is generated. Reference: https://tools.ietf.org/html/rfc7578
def multipart_encode(data, boundary=None): ''' Encode a dict to RFC 7578-compliant form-data data: A dict where keys and values can be either Unicode or bytes-like objects. boundary: If specified a Unicode object, it's used as the boundary. Otherwise a random boundary is generated. Reference: https://tools.ietf.org/html/rfc7578 ''' has_specified_boundary = boundary is not None while True: if boundary is None: boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff)) try: out, content_type = _multipart_encode_impl(data, boundary) break except ValueError: if has_specified_boundary: raise boundary = None return out, content_type
Merge the `dict`s in `dicts` using the first valid value for each key. Normally valid: not None and not an empty string Keyword-only args: unblank: allow empty string if False (default True) rev: merge dicts in reverse order (default False) merge_dicts(dct1, dct2, ..., unblank=False, rev=True) matches {**dct1, **dct2, ...} However, merge_dicts(dct1, dct2, ..., rev=True) may often be better.
def merge_dicts(*dicts, **kwargs): """ Merge the `dict`s in `dicts` using the first valid value for each key. Normally valid: not None and not an empty string Keyword-only args: unblank: allow empty string if False (default True) rev: merge dicts in reverse order (default False) merge_dicts(dct1, dct2, ..., unblank=False, rev=True) matches {**dct1, **dct2, ...} However, merge_dicts(dct1, dct2, ..., rev=True) may often be better. """ unblank = kwargs.get('unblank', True) rev = kwargs.get('rev', False) if unblank: def can_merge_str(k, v, to_dict): return (isinstance(v, compat_str) and v and isinstance(to_dict[k], compat_str) and not to_dict[k]) else: can_merge_str = lambda k, v, to_dict: False merged = {} for a_dict in reversed(dicts) if rev else dicts: for k, v in a_dict.items(): if v is None: continue if (k not in merged) or can_merge_str(k, v, merged): merged[k] = v return merged
Get a numeric quality value out of a list of possible values
def qualities(quality_ids): """ Get a numeric quality value out of a list of possible values """ def q(qid): try: return quality_ids.index(qid) except ValueError: return -1 return q
Add ellipses to overly long strings
def limit_length(s, length): """ Add ellipses to overly long strings """ if s is None: return None ELLIPSES = '...' if len(s) > length: return s[:length - len(ELLIPSES)] + ELLIPSES return s
Returns if youtube-dl can be updated with -U
def ytdl_is_updateable(): """ Returns if youtube-dl can be updated with -U """ from zipimport import zipimporter return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
Returns True iff the content should be blocked
def age_restricted(content_limit, age_limit): """ Returns True iff the content should be blocked """ if age_limit is None: # No limit set return False if content_limit is None: return False # Content available for everyone return age_limit < content_limit
Detect whether a file contains HTML by examining its first bytes.
def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ BOMS = [ (b'\xef\xbb\xbf', 'utf-8'), (b'\x00\x00\xfe\xff', 'utf-32-be'), (b'\xff\xfe\x00\x00', 'utf-32-le'), (b'\xff\xfe', 'utf-16-le'), (b'\xfe\xff', 'utf-16-be'), ] for bom, enc in BOMS: if first_bytes.startswith(bom): s = first_bytes[len(bom):].decode(enc, 'replace') break else: s = first_bytes.decode('utf-8', 'replace') return re.match(r'^\s*<', s)
Render a list of rows, each as a list of values
def render_table(header_row, data): """ Render a list of rows, each as a list of values """ table = [header_row] + data max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)] format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s' return '\n'.join(format_str % tuple(row) for row in table)
Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false
def match_str(filter_str, dct): """ Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """ return all( _match_one(filter_part, dct) for filter_part in filter_str.split('&'))
@param dfxp_data A bytes-like object containing DFXP data @returns A unicode object containing converted SRT data
def dfxp2srt(dfxp_data): ''' @param dfxp_data A bytes-like object containing DFXP data @returns A unicode object containing converted SRT data ''' LEGACY_NAMESPACES = ( (b'http://www.w3.org/ns/ttml', [ b'http://www.w3.org/2004/11/ttaf1', b'http://www.w3.org/2006/04/ttaf1', b'http://www.w3.org/2006/10/ttaf1', ]), (b'http://www.w3.org/ns/ttml#styling', [ b'http://www.w3.org/ns/ttml#style', ]), ) SUPPORTED_STYLING = [ 'color', 'fontFamily', 'fontSize', 'fontStyle', 'fontWeight', 'textDecoration' ] _x = functools.partial(xpath_with_ns, ns_map={ 'xml': 'http://www.w3.org/XML/1998/namespace', 'ttml': 'http://www.w3.org/ns/ttml', 'tts': 'http://www.w3.org/ns/ttml#styling', }) styles = {} default_style = {} class TTMLPElementParser(object): _out = '' _unclosed_elements = [] _applied_styles = [] def start(self, tag, attrib): if tag in (_x('ttml:br'), 'br'): self._out += '\n' else: unclosed_elements = [] style = {} element_style_id = attrib.get('style') if default_style: style.update(default_style) if element_style_id: style.update(styles.get(element_style_id, {})) for prop in SUPPORTED_STYLING: prop_val = attrib.get(_x('tts:' + prop)) if prop_val: style[prop] = prop_val if style: font = '' for k, v in sorted(style.items()): if self._applied_styles and self._applied_styles[-1].get(k) == v: continue if k == 'color': font += ' color="%s"' % v elif k == 'fontSize': font += ' size="%s"' % v elif k == 'fontFamily': font += ' face="%s"' % v elif k == 'fontWeight' and v == 'bold': self._out += '<b>' unclosed_elements.append('b') elif k == 'fontStyle' and v == 'italic': self._out += '<i>' unclosed_elements.append('i') elif k == 'textDecoration' and v == 'underline': self._out += '<u>' unclosed_elements.append('u') if font: self._out += '<font' + font + '>' unclosed_elements.append('font') applied_style = {} if self._applied_styles: applied_style.update(self._applied_styles[-1]) applied_style.update(style) self._applied_styles.append(applied_style) self._unclosed_elements.append(unclosed_elements) def end(self, tag): if tag not in (_x('ttml:br'), 'br'): unclosed_elements = self._unclosed_elements.pop() for element in reversed(unclosed_elements): self._out += '</%s>' % element if unclosed_elements and self._applied_styles: self._applied_styles.pop() def data(self, data): self._out += data def close(self): return self._out.strip() def parse_node(node): target = TTMLPElementParser() parser = xml.etree.ElementTree.XMLParser(target=target) parser.feed(xml.etree.ElementTree.tostring(node)) return parser.close() for k, v in LEGACY_NAMESPACES: for ns in v: dfxp_data = dfxp_data.replace(ns, k) dfxp = compat_etree_fromstring(dfxp_data) out = [] paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p') if not paras: raise ValueError('Invalid dfxp/TTML subtitle') repeat = False while True: for style in dfxp.findall(_x('.//ttml:style')): style_id = style.get('id') or style.get(_x('xml:id')) if not style_id: continue parent_style_id = style.get('style') if parent_style_id: if parent_style_id not in styles: repeat = True continue styles[style_id] = styles[parent_style_id].copy() for prop in SUPPORTED_STYLING: prop_val = style.get(_x('tts:' + prop)) if prop_val: styles.setdefault(style_id, {})[prop] = prop_val if repeat: repeat = False else: break for p in ('body', 'div'): ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p]) if ele is None: continue style = styles.get(ele.get('style')) if not style: continue default_style.update(style) for para, index in zip(paras, itertools.count(1)): begin_time = parse_dfxp_time_expr(para.attrib.get('begin')) end_time = parse_dfxp_time_expr(para.attrib.get('end')) dur = parse_dfxp_time_expr(para.attrib.get('dur')) if begin_time is None: continue if not end_time: if not dur: continue end_time = begin_time + dur out.append('%d\n%s --> %s\n%s\n\n' % ( index, srt_subtitles_timecode(begin_time), srt_subtitles_timecode(end_time), parse_node(para))) return ''.join(out)
long_to_bytes(n:long, blocksize:int) : string Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize.
def long_to_bytes(n, blocksize=0): """long_to_bytes(n:long, blocksize:int) : string Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize. """ # after much testing, this algorithm was deemed to be the fastest s = b'' n = int(n) while n > 0: s = compat_struct_pack('>I', n & 0xffffffff) + s n = n >> 32 # strip off leading zeros for i in range(len(s)): if s[i] != b'\000'[0]: break else: # only happens when n == 0 s = b'\000' i = 0 s = s[i:] # add back some pad bytes. this could be done more efficiently w.r.t. the # de-padding being done above, but sigh... if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * b'\000' + s return s
bytes_to_long(string) : long Convert a byte string to a long integer. This is (essentially) the inverse of long_to_bytes().
def bytes_to_long(s): """bytes_to_long(string) : long Convert a byte string to a long integer. This is (essentially) the inverse of long_to_bytes(). """ acc = 0 length = len(s) if length % 4: extra = (4 - length % 4) s = b'\000' * extra + s length = length + extra for i in range(0, length, 4): acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0] return acc
Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only
def ohdave_rsa_encrypt(data, exponent, modulus): ''' Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/ Input: data: data to encrypt, bytes-like object exponent, modulus: parameter e and N of RSA algorithm, both integer Output: hex string of encrypted data Limitation: supports one block encryption only ''' payload = int(binascii.hexlify(data[::-1]), 16) encrypted = pow(payload, exponent, modulus) return '%x' % encrypted
Padding input data with PKCS#1 scheme @param {int[]} data input data @param {int} length target length @returns {int[]} padded data
def pkcs1pad(data, length): """ Padding input data with PKCS#1 scheme @param {int[]} data input data @param {int} length target length @returns {int[]} padded data """ if len(data) > length - 11: raise ValueError('Input data too long for PKCS#1 padding') pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)] return [0, 2] + pseudo_random + [0] + data
Safely traverse nested `dict`s and `Iterable`s >>> obj = [{}, {"key": "value"}] >>> traverse_obj(obj, (1, "key")) "value" Each of the provided `paths` is tested and the first producing a valid result will be returned. The next path will also be tested if the path branched but no results could be found. Supported values for traversal are `Mapping`, `Iterable` and `re.Match`. Unhelpful values (`{}`, `None`) are treated as the absence of a value and discarded. The paths will be wrapped in `variadic`, so that `'key'` is conveniently the same as `('key', )`. The keys in the path can be one of: - `None`: Return the current object. - `set`: Requires the only item in the set to be a type or function, like `{type}`/`{func}`. If a `type`, returns only values of this type. If a function, returns `func(obj)`. - `str`/`int`: Return `obj[key]`. For `re.Match`, return `obj.group(key)`. - `slice`: Branch out and return all values in `obj[key]`. - `Ellipsis`: Branch out and return a list of all values. - `tuple`/`list`: Branch out and return a list of all matching values. Read as: `[traverse_obj(obj, branch) for branch in branches]`. - `function`: Branch out and return values filtered by the function. Read as: `[value for key, value in obj if function(key, value)]`. For `Sequence`s, `key` is the index of the value. For `Iterable`s, `key` is the enumeration count of the value. For `re.Match`es, `key` is the group number (0 = full match) as well as additionally any group names, if given. - `dict` Transform the current object and return a matching dict. Read as: `{key: traverse_obj(obj, path) for key, path in dct.items()}`. `tuple`, `list`, and `dict` all support nested paths and branches. @params paths Paths which to traverse by. Keyword arguments: @param default Value to return if the paths do not match. If the last key in the path is a `dict`, it will apply to each value inside the dict instead, depth first. Try to avoid if using nested `dict` keys. @param expected_type If a `type`, only accept final values of this type. If any other callable, try to call the function on each result. If the last key in the path is a `dict`, it will apply to each value inside the dict instead, recursively. This does respect branching paths. @param get_all If `False`, return the first matching result, otherwise all matching ones. @param casesense If `False`, consider string dictionary keys as case insensitive. The following are only meant to be used by YoutubeDL.prepare_outtmpl and are not part of the API @param _is_user_input Whether the keys are generated from user input. If `True` strings get converted to `int`/`slice` if needed. @param _traverse_string Whether to traverse into objects as strings. If `True`, any non-compatible object will first be converted into a string and then traversed into. The return value of that path will be a string instead, not respecting any further branching. @returns The result of the object traversal. If successful, `get_all=True`, and the path branches at least once, then a list of results is returned instead. A list is always returned if the last path branches and no `default` is given. If a path ends on a `dict` that result will always be a `dict`.
def traverse_obj(obj, *paths, **kwargs): """ Safely traverse nested `dict`s and `Iterable`s >>> obj = [{}, {"key": "value"}] >>> traverse_obj(obj, (1, "key")) "value" Each of the provided `paths` is tested and the first producing a valid result will be returned. The next path will also be tested if the path branched but no results could be found. Supported values for traversal are `Mapping`, `Iterable` and `re.Match`. Unhelpful values (`{}`, `None`) are treated as the absence of a value and discarded. The paths will be wrapped in `variadic`, so that `'key'` is conveniently the same as `('key', )`. The keys in the path can be one of: - `None`: Return the current object. - `set`: Requires the only item in the set to be a type or function, like `{type}`/`{func}`. If a `type`, returns only values of this type. If a function, returns `func(obj)`. - `str`/`int`: Return `obj[key]`. For `re.Match`, return `obj.group(key)`. - `slice`: Branch out and return all values in `obj[key]`. - `Ellipsis`: Branch out and return a list of all values. - `tuple`/`list`: Branch out and return a list of all matching values. Read as: `[traverse_obj(obj, branch) for branch in branches]`. - `function`: Branch out and return values filtered by the function. Read as: `[value for key, value in obj if function(key, value)]`. For `Sequence`s, `key` is the index of the value. For `Iterable`s, `key` is the enumeration count of the value. For `re.Match`es, `key` is the group number (0 = full match) as well as additionally any group names, if given. - `dict` Transform the current object and return a matching dict. Read as: `{key: traverse_obj(obj, path) for key, path in dct.items()}`. `tuple`, `list`, and `dict` all support nested paths and branches. @params paths Paths which to traverse by. Keyword arguments: @param default Value to return if the paths do not match. If the last key in the path is a `dict`, it will apply to each value inside the dict instead, depth first. Try to avoid if using nested `dict` keys. @param expected_type If a `type`, only accept final values of this type. If any other callable, try to call the function on each result. If the last key in the path is a `dict`, it will apply to each value inside the dict instead, recursively. This does respect branching paths. @param get_all If `False`, return the first matching result, otherwise all matching ones. @param casesense If `False`, consider string dictionary keys as case insensitive. The following are only meant to be used by YoutubeDL.prepare_outtmpl and are not part of the API @param _is_user_input Whether the keys are generated from user input. If `True` strings get converted to `int`/`slice` if needed. @param _traverse_string Whether to traverse into objects as strings. If `True`, any non-compatible object will first be converted into a string and then traversed into. The return value of that path will be a string instead, not respecting any further branching. @returns The result of the object traversal. If successful, `get_all=True`, and the path branches at least once, then a list of results is returned instead. A list is always returned if the last path branches and no `default` is given. If a path ends on a `dict` that result will always be a `dict`. """ # parameter defaults default = kwargs.get('default', NO_DEFAULT) expected_type = kwargs.get('expected_type') get_all = kwargs.get('get_all', True) casesense = kwargs.get('casesense', True) _is_user_input = kwargs.get('_is_user_input', False) _traverse_string = kwargs.get('_traverse_string', False) # instant compat str = compat_str casefold = lambda k: compat_casefold(k) if isinstance(k, str) else k if isinstance(expected_type, type): type_test = lambda val: val if isinstance(val, expected_type) else None else: type_test = lambda val: try_call(expected_type or IDENTITY, args=(val,)) def lookup_or_none(v, k, getter=None): try: return getter(v, k) if getter else v[k] except IndexError: return None def from_iterable(iterables): # chain.from_iterable(['ABC', 'DEF']) --> A B C D E F for it in iterables: for item in it: yield item def apply_key(key, obj, is_last): branching = False if obj is None and _traverse_string: if key is Ellipsis or callable(key) or isinstance(key, slice): branching = True result = () else: result = None elif key is None: result = obj elif isinstance(key, set): assert len(key) == 1, 'Set should only be used to wrap a single item' item = next(iter(key)) if isinstance(item, type): result = obj if isinstance(obj, item) else None else: result = try_call(item, args=(obj,)) elif isinstance(key, (list, tuple)): branching = True result = from_iterable( apply_path(obj, branch, is_last)[0] for branch in key) elif key is Ellipsis: branching = True if isinstance(obj, compat_collections_abc.Mapping): result = obj.values() elif is_iterable_like(obj): result = obj elif isinstance(obj, compat_re_Match): result = obj.groups() elif _traverse_string: branching = False result = str(obj) else: result = () elif callable(key): branching = True if isinstance(obj, compat_collections_abc.Mapping): iter_obj = obj.items() elif is_iterable_like(obj): iter_obj = enumerate(obj) elif isinstance(obj, compat_re_Match): iter_obj = itertools.chain( enumerate(itertools.chain((obj.group(),), obj.groups())), obj.groupdict().items()) elif _traverse_string: branching = False iter_obj = enumerate(str(obj)) else: iter_obj = () result = (v for k, v in iter_obj if try_call(key, args=(k, v))) if not branching: # string traversal result = ''.join(result) elif isinstance(key, dict): iter_obj = ((k, _traverse_obj(obj, v, False, is_last)) for k, v in key.items()) result = dict((k, v if v is not None else default) for k, v in iter_obj if v is not None or default is not NO_DEFAULT) or None elif isinstance(obj, compat_collections_abc.Mapping): result = (try_call(obj.get, args=(key,)) if casesense or try_call(obj.__contains__, args=(key,)) else next((v for k, v in obj.items() if casefold(k) == key), None)) elif isinstance(obj, compat_re_Match): result = None if isinstance(key, int) or casesense: # Py 2.6 doesn't have methods in the Match class/type result = lookup_or_none(obj, key, getter=lambda _, k: obj.group(k)) elif isinstance(key, str): result = next((v for k, v in obj.groupdict().items() if casefold(k) == key), None) else: result = None if isinstance(key, (int, slice)): if is_iterable_like(obj, compat_collections_abc.Sequence): branching = isinstance(key, slice) result = lookup_or_none(obj, key) elif _traverse_string: result = lookup_or_none(str(obj), key) return branching, result if branching else (result,) def lazy_last(iterable): iterator = iter(iterable) prev = next(iterator, NO_DEFAULT) if prev is NO_DEFAULT: return for item in iterator: yield False, prev prev = item yield True, prev def apply_path(start_obj, path, test_type): objs = (start_obj,) has_branched = False key = None for last, key in lazy_last(variadic(path, (str, bytes, dict, set))): if _is_user_input and isinstance(key, str): if key == ':': key = Ellipsis elif ':' in key: key = slice(*map(int_or_none, key.split(':'))) elif int_or_none(key) is not None: key = int(key) if not casesense and isinstance(key, str): key = compat_casefold(key) if __debug__ and callable(key): # Verify function signature _try_bind_args(key, None, None) new_objs = [] for obj in objs: branching, results = apply_key(key, obj, last) has_branched |= branching new_objs.append(results) objs = from_iterable(new_objs) if test_type and not isinstance(key, (dict, list, tuple)): objs = map(type_test, objs) return objs, has_branched, isinstance(key, dict) def _traverse_obj(obj, path, allow_empty, test_type): results, has_branched, is_dict = apply_path(obj, path, test_type) results = LazyList(x for x in results if x not in (None, {})) if get_all and has_branched: if results: return results.exhaust() if allow_empty: return [] if default is NO_DEFAULT else default return None return results[0] if results else {} if allow_empty and is_dict else None for index, path in enumerate(paths, 1): result = _traverse_obj(obj, path, index == len(paths), True) if result is not None: return result return None if default is NO_DEFAULT else default
For use in yt-dl instead of {type} or set((type,))
def T(x): """ For use in yt-dl instead of {type} or set((type,)) """ return set((x,))
Given the name of the executable, see whether we support the given downloader .
def get_external_downloader(external_downloader): """ Given the name of the executable, see whether we support the given downloader . """ # Drop .exe extension on Windows bn = os.path.splitext(os.path.basename(external_downloader))[0] return _BY_NAME[bn]
Return a list of (segment, fragment) for each fragment in the video
def build_fragments_list(boot_info): """ Return a list of (segment, fragment) for each fragment in the video """ res = [] segment_run_table = boot_info['segments'][0] fragment_run_entry_table = boot_info['fragments'][0]['fragments'] first_frag_number = fragment_run_entry_table[0]['first'] fragments_counter = itertools.count(first_frag_number) for segment, fragments_count in segment_run_table['segment_run']: # In some live HDS streams (for example Rai), `fragments_count` is # abnormal and causing out-of-memory errors. It's OK to change the # number of fragments for live streams as they are updated periodically if fragments_count == 4294967295 and boot_info['live']: fragments_count = 2 for _ in range(fragments_count): res.append((segment, next(fragments_counter))) if boot_info['live']: res = res[-2:] return res
Writes the FLV header to stream
def write_flv_header(stream): """Writes the FLV header to stream""" # FLV header stream.write(b'FLV\x01') stream.write(b'\x05') stream.write(b'\x00\x00\x00\x09') stream.write(b'\x00\x00\x00\x00')
Writes optional metadata tag to stream
def write_metadata_tag(stream, metadata): """Writes optional metadata tag to stream""" SCRIPT_TAG = b'\x12' FLV_TAG_HEADER_LEN = 11 if metadata: stream.write(SCRIPT_TAG) write_unsigned_int_24(stream, len(metadata)) stream.write(b'\x00\x00\x00\x00\x00\x00\x00') stream.write(metadata) write_unsigned_int(stream, FLV_TAG_HEADER_LEN + len(metadata))
Get the downloader class that can handle the info dict.
def _get_suitable_downloader(info_dict, params={}): """Get the downloader class that can handle the info dict.""" # if (info_dict.get('start_time') or info_dict.get('end_time')) and not info_dict.get('requested_formats') and FFmpegFD.can_download(info_dict): # return FFmpegFD external_downloader = params.get('external_downloader') if external_downloader is not None: ed = get_external_downloader(external_downloader) if ed.can_download(info_dict): return ed # Avoid using unwanted args since external_downloader was rejected if params.get('external_downloader_args'): params['external_downloader_args'] = None protocol = info_dict['protocol'] if protocol.startswith('m3u8') and info_dict.get('is_live'): return FFmpegFD if protocol == 'm3u8' and params.get('hls_prefer_native') is True: return HlsFD if protocol == 'm3u8_native' and params.get('hls_prefer_native') is False: return FFmpegFD return PROTOCOL_MAP.get(protocol, HttpFD)
Return a list of supported extractors. The order does matter; the first extractor matched is the one handling the URL.
def gen_extractor_classes(): """ Return a list of supported extractors. The order does matter; the first extractor matched is the one handling the URL. """ return _ALL_CLASSES
Return a list of an instance of every supported extractor. The order does matter; the first extractor matched is the one handling the URL.
def gen_extractors(): """ Return a list of an instance of every supported extractor. The order does matter; the first extractor matched is the one handling the URL. """ return [klass() for klass in gen_extractor_classes()]
Return a list of extractors that are suitable for the given age, sorted by extractor ID.
def list_extractors(age_limit): """ Return a list of extractors that are suitable for the given age, sorted by extractor ID. """ return sorted( filter(lambda ie: ie.is_suitable(age_limit), gen_extractors()), key=lambda ie: ie.IE_NAME.lower())
Returns the info extractor class with the given ie_name
def get_info_extractor(ie_name): """Returns the info extractor class with the given ie_name""" return globals()[ie_name + 'IE']
@returns (name, path)
def exe(onedir): """@returns (name, path)""" name = '_'.join(filter(None, ( 'yt-dlp', {'win32': '', 'darwin': 'macos'}.get(OS_NAME, OS_NAME), MACHINE, ))) return name, ''.join(filter(None, ( 'dist/', onedir and f'{name}/', name, OS_NAME == 'win32' and '.exe' )))
find the correct sorting and add the required base classes so that subclasses can be correctly created
def sort_ies(ies, ignored_bases): """find the correct sorting and add the required base classes so that subclasses can be correctly created""" classes, returned_classes = ies[:-1], set() assert ies[-1].__name__ == 'GenericIE', 'Last IE must be GenericIE' while classes: for c in classes[:]: bases = set(c.__bases__) - {object, *ignored_bases} restart = False for b in sorted(bases, key=lambda x: x.__name__): if b not in classes and b not in returned_classes: assert b.__name__ != 'GenericIE', 'Cannot inherit from GenericIE' classes.insert(0, b) restart = True if restart: break if bases <= returned_classes: yield c returned_classes.add(c) classes.remove(c) break yield ies[-1]
Get the version without importing the package
def read_version(fname='yt_dlp/version.py', varname='__version__'): """Get the version without importing the package""" items = {} exec(compile(read_file(fname), fname, 'exec'), items) return items[varname]
Remove a file if it exists
def try_rm(filename): """ Remove a file if it exists """ try: os.remove(filename) except OSError as ose: if ose.errno != errno.ENOENT: raise
Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored
def report_warning(message, *args, **kwargs): ''' Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored ''' if sys.stderr.isatty() and compat_os_name != 'nt': _msg_header = '\033[0;33mWARNING:\033[0m' else: _msg_header = 'WARNING:' output = f'{_msg_header} {message}\n' if 'b' in getattr(sys.stderr, 'mode', ''): output = output.encode(preferredencoding()) sys.stderr.write(output)
Returns true if the file has been downloaded
def _download_restricted(url, filename, age): """ Returns true if the file has been downloaded """ params = { 'age_limit': age, 'skip_download': True, 'writeinfojson': True, 'outtmpl': '%(id)s.%(ext)s', } ydl = YoutubeDL(params) ydl.add_default_info_extractors() json_filename = os.path.splitext(filename)[0] + '.info.json' try_rm(json_filename) try: ydl.download([url]) except DownloadError: pass else: return os.path.exists(json_filename) finally: try_rm(json_filename)
PKCS#7 padding @param {int[]} data cleartext @returns {int[]} padding data
def pkcs7_padding(data): """ PKCS#7 padding @param {int[]} data cleartext @returns {int[]} padding data """ remaining_length = BLOCK_SIZE_BYTES - len(data) % BLOCK_SIZE_BYTES return data + [remaining_length] * remaining_length
Pad a block with the given padding mode @param {int[]} block block to pad @param padding_mode padding mode
def pad_block(block, padding_mode): """ Pad a block with the given padding mode @param {int[]} block block to pad @param padding_mode padding mode """ padding_size = BLOCK_SIZE_BYTES - len(block) PADDING_BYTE = { 'pkcs7': padding_size, 'iso7816': 0x0, 'whitespace': 0x20, 'zero': 0x0, } if padding_size < 0: raise ValueError('Block size exceeded') elif padding_mode not in PADDING_BYTE: raise NotImplementedError(f'Padding mode {padding_mode} is not implemented') if padding_mode == 'iso7816' and padding_size: block = block + [0x80] # NB: += mutates list padding_size -= 1 return block + [PADDING_BYTE[padding_mode]] * padding_size
Encrypt with aes in ECB mode. Using PKCS#7 padding @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv Unused for this mode @returns {int[]} encrypted data
def aes_ecb_encrypt(data, key, iv=None): """ Encrypt with aes in ECB mode. Using PKCS#7 padding @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv Unused for this mode @returns {int[]} encrypted data """ expanded_key = key_expansion(key) block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) encrypted_data = [] for i in range(block_count): block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] encrypted_data += aes_encrypt(pkcs7_padding(block), expanded_key) return encrypted_data
Decrypt with aes in ECB mode @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv Unused for this mode @returns {int[]} decrypted data
def aes_ecb_decrypt(data, key, iv=None): """ Decrypt with aes in ECB mode @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv Unused for this mode @returns {int[]} decrypted data """ expanded_key = key_expansion(key) block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) encrypted_data = [] for i in range(block_count): block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] encrypted_data += aes_decrypt(block, expanded_key) encrypted_data = encrypted_data[:len(data)] return encrypted_data
Decrypt with aes in counter mode @param {int[]} data cipher @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte initialization vector @returns {int[]} decrypted data
def aes_ctr_decrypt(data, key, iv): """ Decrypt with aes in counter mode @param {int[]} data cipher @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte initialization vector @returns {int[]} decrypted data """ return aes_ctr_encrypt(data, key, iv)
Encrypt with aes in counter mode @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte initialization vector @returns {int[]} encrypted data
def aes_ctr_encrypt(data, key, iv): """ Encrypt with aes in counter mode @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte initialization vector @returns {int[]} encrypted data """ expanded_key = key_expansion(key) block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) counter = iter_vector(iv) encrypted_data = [] for i in range(block_count): counter_block = next(counter) block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] block += [0] * (BLOCK_SIZE_BYTES - len(block)) cipher_counter_block = aes_encrypt(counter_block, expanded_key) encrypted_data += xor(block, cipher_counter_block) encrypted_data = encrypted_data[:len(data)] return encrypted_data
Decrypt with aes in CBC mode @param {int[]} data cipher @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte IV @returns {int[]} decrypted data
def aes_cbc_decrypt(data, key, iv): """ Decrypt with aes in CBC mode @param {int[]} data cipher @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte IV @returns {int[]} decrypted data """ expanded_key = key_expansion(key) block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) decrypted_data = [] previous_cipher_block = iv for i in range(block_count): block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] block += [0] * (BLOCK_SIZE_BYTES - len(block)) decrypted_block = aes_decrypt(block, expanded_key) decrypted_data += xor(decrypted_block, previous_cipher_block) previous_cipher_block = block decrypted_data = decrypted_data[:len(data)] return decrypted_data
Encrypt with aes in CBC mode @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte IV @param padding_mode Padding mode to use @returns {int[]} encrypted data
def aes_cbc_encrypt(data, key, iv, *, padding_mode='pkcs7'): """ Encrypt with aes in CBC mode @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte IV @param padding_mode Padding mode to use @returns {int[]} encrypted data """ expanded_key = key_expansion(key) block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES)) encrypted_data = [] previous_cipher_block = iv for i in range(block_count): block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] block = pad_block(block, padding_mode) mixed_block = xor(block, previous_cipher_block) encrypted_block = aes_encrypt(mixed_block, expanded_key) encrypted_data += encrypted_block previous_cipher_block = encrypted_block return encrypted_data
Decrypt with aes in GBM mode and checks authenticity using tag @param {int[]} data cipher @param {int[]} key 16-Byte cipher key @param {int[]} tag authentication tag @param {int[]} nonce IV (recommended 12-Byte) @returns {int[]} decrypted data
def aes_gcm_decrypt_and_verify(data, key, tag, nonce): """ Decrypt with aes in GBM mode and checks authenticity using tag @param {int[]} data cipher @param {int[]} key 16-Byte cipher key @param {int[]} tag authentication tag @param {int[]} nonce IV (recommended 12-Byte) @returns {int[]} decrypted data """ # XXX: check aes, gcm param hash_subkey = aes_encrypt([0] * BLOCK_SIZE_BYTES, key_expansion(key)) if len(nonce) == 12: j0 = nonce + [0, 0, 0, 1] else: fill = (BLOCK_SIZE_BYTES - (len(nonce) % BLOCK_SIZE_BYTES)) % BLOCK_SIZE_BYTES + 8 ghash_in = nonce + [0] * fill + bytes_to_intlist((8 * len(nonce)).to_bytes(8, 'big')) j0 = ghash(hash_subkey, ghash_in) # TODO: add nonce support to aes_ctr_decrypt # nonce_ctr = j0[:12] iv_ctr = inc(j0) decrypted_data = aes_ctr_decrypt(data, key, iv_ctr + [0] * (BLOCK_SIZE_BYTES - len(iv_ctr))) pad_len = len(data) // 16 * 16 s_tag = ghash( hash_subkey, data + [0] * (BLOCK_SIZE_BYTES - len(data) + pad_len) # pad + bytes_to_intlist((0 * 8).to_bytes(8, 'big') # length of associated data + ((len(data) * 8).to_bytes(8, 'big'))) # length of data ) if tag != aes_ctr_encrypt(s_tag, key, j0): raise ValueError("Mismatching authentication tag") return decrypted_data
Encrypt one block with aes @param {int[]} data 16-Byte state @param {int[]} expanded_key 176/208/240-Byte expanded key @returns {int[]} 16-Byte cipher
def aes_encrypt(data, expanded_key): """ Encrypt one block with aes @param {int[]} data 16-Byte state @param {int[]} expanded_key 176/208/240-Byte expanded key @returns {int[]} 16-Byte cipher """ rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) for i in range(1, rounds + 1): data = sub_bytes(data) data = shift_rows(data) if i != rounds: data = list(iter_mix_columns(data, MIX_COLUMN_MATRIX)) data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]) return data
Decrypt one block with aes @param {int[]} data 16-Byte cipher @param {int[]} expanded_key 176/208/240-Byte expanded key @returns {int[]} 16-Byte state
def aes_decrypt(data, expanded_key): """ Decrypt one block with aes @param {int[]} data 16-Byte cipher @param {int[]} expanded_key 176/208/240-Byte expanded key @returns {int[]} 16-Byte state """ rounds = len(expanded_key) // BLOCK_SIZE_BYTES - 1 for i in range(rounds, 0, -1): data = xor(data, expanded_key[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]) if i != rounds: data = list(iter_mix_columns(data, MIX_COLUMN_MATRIX_INV)) data = shift_rows_inv(data) data = sub_bytes_inv(data) data = xor(data, expanded_key[:BLOCK_SIZE_BYTES]) return data
Decrypt text - The first 8 Bytes of decoded 'data' are the 8 high Bytes of the counter - The cipher key is retrieved by encrypting the first 16 Byte of 'password' with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's) - Mode of operation is 'counter' @param {str} data Base64 encoded string @param {str,unicode} password Password (will be encoded with utf-8) @param {int} key_size_bytes Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit @returns {str} Decrypted data
def aes_decrypt_text(data, password, key_size_bytes): """ Decrypt text - The first 8 Bytes of decoded 'data' are the 8 high Bytes of the counter - The cipher key is retrieved by encrypting the first 16 Byte of 'password' with the first 'key_size_bytes' Bytes from 'password' (if necessary filled with 0's) - Mode of operation is 'counter' @param {str} data Base64 encoded string @param {str,unicode} password Password (will be encoded with utf-8) @param {int} key_size_bytes Possible values: 16 for 128-Bit, 24 for 192-Bit or 32 for 256-Bit @returns {str} Decrypted data """ NONCE_LENGTH_BYTES = 8 data = bytes_to_intlist(base64.b64decode(data)) password = bytes_to_intlist(password.encode()) key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password)) key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES) nonce = data[:NONCE_LENGTH_BYTES] cipher = data[NONCE_LENGTH_BYTES:] decrypted_data = aes_ctr_decrypt(cipher, key, nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)) plaintext = intlist_to_bytes(decrypted_data) return plaintext
Generate key schedule @param {int[]} data 16/24/32-Byte cipher key @returns {int[]} 176/208/240-Byte expanded key
def key_expansion(data): """ Generate key schedule @param {int[]} data 16/24/32-Byte cipher key @returns {int[]} 176/208/240-Byte expanded key """ data = data[:] # copy rcon_iteration = 1 key_size_bytes = len(data) expanded_key_size_bytes = (key_size_bytes // 4 + 7) * BLOCK_SIZE_BYTES while len(data) < expanded_key_size_bytes: temp = data[-4:] temp = key_schedule_core(temp, rcon_iteration) rcon_iteration += 1 data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) for _ in range(3): temp = data[-4:] data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) if key_size_bytes == 32: temp = data[-4:] temp = sub_bytes(temp) data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0): temp = data[-4:] data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes]) data = data[:expanded_key_size_bytes] return data
References: - https://github.com/libyal/dtformats/blob/main/documentation/Safari%20Cookies.asciidoc - this data appears to be out of date but the important parts of the database structure is the same - there are a few bytes here and there which are skipped during parsing
def parse_safari_cookies(data, jar=None, logger=YDLLogger()): """ References: - https://github.com/libyal/dtformats/blob/main/documentation/Safari%20Cookies.asciidoc - this data appears to be out of date but the important parts of the database structure is the same - there are a few bytes here and there which are skipped during parsing """ if jar is None: jar = YoutubeDLCookieJar() page_sizes, body_start = _parse_safari_cookies_header(data, logger) p = DataParser(data[body_start:], logger) for page_size in page_sizes: _parse_safari_cookies_page(p.read_bytes(page_size), jar, logger) p.skip_to_end('footer') return jar
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.cc GetDesktopEnvironment
def _get_linux_desktop_environment(env, logger): """ https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.cc GetDesktopEnvironment """ xdg_current_desktop = env.get('XDG_CURRENT_DESKTOP', None) desktop_session = env.get('DESKTOP_SESSION', None) if xdg_current_desktop is not None: xdg_current_desktop = xdg_current_desktop.split(':')[0].strip() if xdg_current_desktop == 'Unity': if desktop_session is not None and 'gnome-fallback' in desktop_session: return _LinuxDesktopEnvironment.GNOME else: return _LinuxDesktopEnvironment.UNITY elif xdg_current_desktop == 'Deepin': return _LinuxDesktopEnvironment.DEEPIN elif xdg_current_desktop == 'GNOME': return _LinuxDesktopEnvironment.GNOME elif xdg_current_desktop == 'X-Cinnamon': return _LinuxDesktopEnvironment.CINNAMON elif xdg_current_desktop == 'KDE': kde_version = env.get('KDE_SESSION_VERSION', None) if kde_version == '5': return _LinuxDesktopEnvironment.KDE5 elif kde_version == '6': return _LinuxDesktopEnvironment.KDE6 elif kde_version == '4': return _LinuxDesktopEnvironment.KDE4 else: logger.info(f'unknown KDE version: "{kde_version}". Assuming KDE4') return _LinuxDesktopEnvironment.KDE4 elif xdg_current_desktop == 'Pantheon': return _LinuxDesktopEnvironment.PANTHEON elif xdg_current_desktop == 'XFCE': return _LinuxDesktopEnvironment.XFCE elif xdg_current_desktop == 'UKUI': return _LinuxDesktopEnvironment.UKUI elif xdg_current_desktop == 'LXQt': return _LinuxDesktopEnvironment.LXQT else: logger.info(f'XDG_CURRENT_DESKTOP is set to an unknown value: "{xdg_current_desktop}"') elif desktop_session is not None: if desktop_session == 'deepin': return _LinuxDesktopEnvironment.DEEPIN elif desktop_session in ('mate', 'gnome'): return _LinuxDesktopEnvironment.GNOME elif desktop_session in ('kde4', 'kde-plasma'): return _LinuxDesktopEnvironment.KDE4 elif desktop_session == 'kde': if 'KDE_SESSION_VERSION' in env: return _LinuxDesktopEnvironment.KDE4 else: return _LinuxDesktopEnvironment.KDE3 elif 'xfce' in desktop_session or desktop_session == 'xubuntu': return _LinuxDesktopEnvironment.XFCE elif desktop_session == 'ukui': return _LinuxDesktopEnvironment.UKUI else: logger.info(f'DESKTOP_SESSION is set to an unknown value: "{desktop_session}"') else: if 'GNOME_DESKTOP_SESSION_ID' in env: return _LinuxDesktopEnvironment.GNOME elif 'KDE_FULL_SESSION' in env: if 'KDE_SESSION_VERSION' in env: return _LinuxDesktopEnvironment.KDE4 else: return _LinuxDesktopEnvironment.KDE3 return _LinuxDesktopEnvironment.OTHER
SelectBackend in [1] There is currently support for forcing chromium to use BASIC_TEXT by creating a file called `Disable Local Encryption` [1] in the user data dir. The function to write this file (`WriteBackendUse()` [1]) does not appear to be called anywhere other than in tests, so the user would have to create this file manually and so would be aware enough to tell yt-dlp to use the BASIC_TEXT keyring. References: - [1] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/sync/key_storage_util_linux.cc
def _choose_linux_keyring(logger): """ SelectBackend in [1] There is currently support for forcing chromium to use BASIC_TEXT by creating a file called `Disable Local Encryption` [1] in the user data dir. The function to write this file (`WriteBackendUse()` [1]) does not appear to be called anywhere other than in tests, so the user would have to create this file manually and so would be aware enough to tell yt-dlp to use the BASIC_TEXT keyring. References: - [1] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/sync/key_storage_util_linux.cc """ desktop_environment = _get_linux_desktop_environment(os.environ, logger) logger.debug(f'detected desktop environment: {desktop_environment.name}') if desktop_environment == _LinuxDesktopEnvironment.KDE4: linux_keyring = _LinuxKeyring.KWALLET elif desktop_environment == _LinuxDesktopEnvironment.KDE5: linux_keyring = _LinuxKeyring.KWALLET5 elif desktop_environment == _LinuxDesktopEnvironment.KDE6: linux_keyring = _LinuxKeyring.KWALLET6 elif desktop_environment in ( _LinuxDesktopEnvironment.KDE3, _LinuxDesktopEnvironment.LXQT, _LinuxDesktopEnvironment.OTHER ): linux_keyring = _LinuxKeyring.BASICTEXT else: linux_keyring = _LinuxKeyring.GNOMEKEYRING return linux_keyring
The name of the wallet used to store network passwords. https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/sync/kwallet_dbus.cc KWalletDBus::NetworkWallet which does a dbus call to the following function: https://api.kde.org/frameworks/kwallet/html/classKWallet_1_1Wallet.html Wallet::NetworkWallet
def _get_kwallet_network_wallet(keyring, logger): """ The name of the wallet used to store network passwords. https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/sync/kwallet_dbus.cc KWalletDBus::NetworkWallet which does a dbus call to the following function: https://api.kde.org/frameworks/kwallet/html/classKWallet_1_1Wallet.html Wallet::NetworkWallet """ default_wallet = 'kdewallet' try: if keyring == _LinuxKeyring.KWALLET: service_name = 'org.kde.kwalletd' wallet_path = '/modules/kwalletd' elif keyring == _LinuxKeyring.KWALLET5: service_name = 'org.kde.kwalletd5' wallet_path = '/modules/kwalletd5' elif keyring == _LinuxKeyring.KWALLET6: service_name = 'org.kde.kwalletd6' wallet_path = '/modules/kwalletd6' else: raise ValueError(keyring) stdout, _, returncode = Popen.run([ 'dbus-send', '--session', '--print-reply=literal', f'--dest={service_name}', wallet_path, 'org.kde.KWallet.networkWallet' ], text=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) if returncode: logger.warning('failed to read NetworkWallet') return default_wallet else: logger.debug(f'NetworkWallet = "{stdout.strip()}"') return stdout.strip() except Exception as e: logger.warning(f'exception while obtaining NetworkWallet: {e}') return default_wallet
References: - [1] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/sync/os_crypt_win.cc
def _get_windows_v10_key(browser_root, logger): """ References: - [1] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/sync/os_crypt_win.cc """ path = _newest(_find_files(browser_root, 'Local State', logger)) if path is None: logger.error('could not find local state file') return None logger.debug(f'Found local state file at "{path}"') with open(path, encoding='utf8') as f: data = json.load(f) try: # kOsCryptEncryptedKeyPrefName in [1] base64_key = data['os_crypt']['encrypted_key'] except KeyError: logger.error('no encrypted key in Local State') return None encrypted_key = base64.b64decode(base64_key) # kDPAPIKeyPrefix in [1] prefix = b'DPAPI' if not encrypted_key.startswith(prefix): logger.error('invalid key') return None return _decrypt_windows_dpapi(encrypted_key[len(prefix):], logger)
References: - https://docs.microsoft.com/en-us/windows/win32/api/dpapi/nf-dpapi-cryptunprotectdata
def _decrypt_windows_dpapi(ciphertext, logger): """ References: - https://docs.microsoft.com/en-us/windows/win32/api/dpapi/nf-dpapi-cryptunprotectdata """ import ctypes import ctypes.wintypes class DATA_BLOB(ctypes.Structure): _fields_ = [('cbData', ctypes.wintypes.DWORD), ('pbData', ctypes.POINTER(ctypes.c_char))] buffer = ctypes.create_string_buffer(ciphertext) blob_in = DATA_BLOB(ctypes.sizeof(buffer), buffer) blob_out = DATA_BLOB() ret = ctypes.windll.crypt32.CryptUnprotectData( ctypes.byref(blob_in), # pDataIn None, # ppszDataDescr: human readable description of pDataIn None, # pOptionalEntropy: salt? None, # pvReserved: must be NULL None, # pPromptStruct: information about prompts to display 0, # dwFlags ctypes.byref(blob_out) # pDataOut ) if not ret: logger.warning('failed to decrypt with DPAPI', only_once=True) return None result = ctypes.string_at(blob_out.pbData, blob_out.cbData) ctypes.windll.kernel32.LocalFree(blob_out.pbData) return result
Simulate JS's ternary operator (cndn?if_true:if_false)
def _js_ternary(cndn, if_true=True, if_false=False): """Simulate JS's ternary operator (cndn?if_true:if_false)""" if cndn in (False, None, 0, '', JS_Undefined): return if_false with contextlib.suppress(TypeError): if math.isnan(cndn): # NB: NaN cannot be checked by membership return if_false return if_true
@param f String representation of formatting to apply in the form: [style] [light] font_color [on [light] bg_color] E.g. "red", "bold green on light blue"
def format_text(text, f): ''' @param f String representation of formatting to apply in the form: [style] [light] font_color [on [light] bg_color] E.g. "red", "bold green on light blue" ''' f = f.upper() tokens = f.strip().split() bg_color = '' if 'ON' in tokens: if tokens[-1] == 'ON': raise SyntaxError(f'Empty background format specified in {f!r}') if tokens[-1] not in _COLORS: raise SyntaxError(f'{tokens[-1]} in {f!r} must be a color') bg_color = f'4{_COLORS[tokens.pop()]}' if tokens[-1] == 'LIGHT': bg_color = f'0;10{bg_color[1:]}' tokens.pop() if tokens[-1] != 'ON': raise SyntaxError(f'Invalid format {f.split(" ON ", 1)[1]!r} in {f!r}') bg_color = f'\033[{bg_color}m' tokens.pop() if not tokens: fg_color = '' elif tokens[-1] not in _COLORS: raise SyntaxError(f'{tokens[-1]} in {f!r} must be a color') else: fg_color = f'3{_COLORS[tokens.pop()]}' if tokens and tokens[-1] == 'LIGHT': fg_color = f'9{fg_color[1:]}' tokens.pop() fg_style = tokens.pop() if tokens and tokens[-1] in _TEXT_STYLES else 'NORMAL' fg_color = f'\033[{_TEXT_STYLES[fg_style]};{fg_color}m' if tokens: raise SyntaxError(f'Invalid format {" ".join(tokens)!r} in {f!r}') if fg_color or bg_color: text = text.replace(CONTROL_SEQUENCES['RESET'], f'{fg_color}{bg_color}') return f'{fg_color}{bg_color}{text}{CONTROL_SEQUENCES["RESET"]}' else: return text
@returns (variant, executable_path)
def _get_variant_and_executable_path(): """@returns (variant, executable_path)""" if getattr(sys, 'frozen', False): path = sys.executable if not hasattr(sys, '_MEIPASS'): return 'py2exe', path elif sys._MEIPASS == os.path.dirname(path): return f'{sys.platform}_dir', path elif sys.platform == 'darwin': machine = '_legacy' if version_tuple(platform.mac_ver()[0]) < (10, 15) else '' else: machine = f'_{platform.machine().lower()}' # Ref: https://en.wikipedia.org/wiki/Uname#Examples if machine[1:] in ('x86', 'x86_64', 'amd64', 'i386', 'i686'): machine = '_x86' if platform.architecture()[0][:2] == '32' else '' # sys.executable returns a /tmp/ path for staticx builds (linux_static) # Ref: https://staticx.readthedocs.io/en/latest/usage.html#run-time-information if static_exe_path := os.getenv('STATICX_PROG_PATH'): path = static_exe_path return f'{remove_end(sys.platform, "32")}{machine}_exe', path path = os.path.dirname(__file__) if isinstance(__loader__, zipimporter): return 'zip', os.path.join(path, '..') elif (os.path.basename(sys.argv[0]) in ('__main__.py', '-m') and os.path.exists(os.path.join(path, '../.git/HEAD'))): return 'source', path return 'unknown', path
Update the program file with the latest version from the repository @returns Whether there was a successful update (No update = False)
def run_update(ydl): """Update the program file with the latest version from the repository @returns Whether there was a successful update (No update = False) """ return Updater(ydl).update()
Convert a parsed WebVTT timestamp (a re.Match obtained from _REGEX_TS) into an MPEG PES timestamp: a tick counter at 90 kHz resolution.
def _parse_ts(ts): """ Convert a parsed WebVTT timestamp (a re.Match obtained from _REGEX_TS) into an MPEG PES timestamp: a tick counter at 90 kHz resolution. """ return 90 * sum( int(part or 0) * mult for part, mult in zip(ts.groups(), (3600_000, 60_000, 1000, 1)))
Convert an MPEG PES timestamp into a WebVTT timestamp. This will lose sub-millisecond precision.
def _format_ts(ts): """ Convert an MPEG PES timestamp into a WebVTT timestamp. This will lose sub-millisecond precision. """ return '%02u:%02u:%02u.%03u' % timetuple_from_msec(int((ts + 45) // 90))
A generator that yields (partially) parsed WebVTT blocks when given a bytes object containing the raw contents of a WebVTT file.
def parse_fragment(frag_content): """ A generator that yields (partially) parsed WebVTT blocks when given a bytes object containing the raw contents of a WebVTT file. """ parser = _MatchParser(frag_content.decode()) yield Magic.parse(parser) while not parser.match(_REGEX_EOF): if parser.consume(_REGEX_BLANK): continue block = RegionBlock.parse(parser) if block: yield block continue block = StyleBlock.parse(parser) if block: yield block continue block = CommentBlock.parse(parser) if block: yield block # XXX: or skip continue break while not parser.match(_REGEX_EOF): if parser.consume(_REGEX_BLANK): continue block = CommentBlock.parse(parser) if block: yield block # XXX: or skip continue block = CueBlock.parse(parser) if block: yield block continue raise ParseError(parser)
@param verbose -1: quiet, 0: normal, 1: verbose
def get_urls(urls, batchfile, verbose): """ @param verbose -1: quiet, 0: normal, 1: verbose """ batch_urls = [] if batchfile is not None: try: batch_urls = read_batch_urls( read_stdin(None if verbose == -1 else 'URLs') if batchfile == '-' else open(expand_path(batchfile), encoding='utf-8', errors='ignore')) if verbose == 1: write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n') except OSError: _exit(f'ERROR: batch file {batchfile} could not be read') _enc = preferredencoding() return [ url.strip().decode(_enc, 'ignore') if isinstance(url, bytes) else url.strip() for url in batch_urls + urls]
@returns ParsedOptions(parser, opts, urls, ydl_opts)
def parse_options(argv=None): """@returns ParsedOptions(parser, opts, urls, ydl_opts)""" parser, opts, urls = parseOpts(argv) urls = get_urls(urls, opts.batchfile, -1 if opts.quiet and not opts.verbose else opts.verbose) set_compat_opts(opts) try: warnings, deprecation_warnings = validate_options(opts) except ValueError as err: parser.error(f'{err}\n') postprocessors = list(get_postprocessors(opts)) print_only = bool(opts.forceprint) and all(k not in opts.forceprint for k in POSTPROCESS_WHEN[3:]) any_getting = any(getattr(opts, k) for k in ( 'dumpjson', 'dump_single_json', 'getdescription', 'getduration', 'getfilename', 'getformat', 'getid', 'getthumbnail', 'gettitle', 'geturl' )) if opts.quiet is None: opts.quiet = any_getting or opts.print_json or bool(opts.forceprint) playlist_pps = [pp for pp in postprocessors if pp.get('when') == 'playlist'] write_playlist_infojson = (opts.writeinfojson and not opts.clean_infojson and opts.allow_playlist_files and opts.outtmpl.get('pl_infojson') != '') if not any(( opts.extract_flat, opts.dump_single_json, opts.forceprint.get('playlist'), opts.print_to_file.get('playlist'), write_playlist_infojson, )): if not playlist_pps: opts.extract_flat = 'discard' elif playlist_pps == [{'key': 'FFmpegConcat', 'only_multi_video': True, 'when': 'playlist'}]: opts.extract_flat = 'discard_in_playlist' final_ext = ( opts.recodevideo if opts.recodevideo in FFmpegVideoConvertorPP.SUPPORTED_EXTS else opts.remuxvideo if opts.remuxvideo in FFmpegVideoRemuxerPP.SUPPORTED_EXTS else opts.audioformat if (opts.extractaudio and opts.audioformat in FFmpegExtractAudioPP.SUPPORTED_EXTS) else None) return ParsedOptions(parser, opts, urls, { 'usenetrc': opts.usenetrc, 'netrc_location': opts.netrc_location, 'netrc_cmd': opts.netrc_cmd, 'username': opts.username, 'password': opts.password, 'twofactor': opts.twofactor, 'videopassword': opts.videopassword, 'ap_mso': opts.ap_mso, 'ap_username': opts.ap_username, 'ap_password': opts.ap_password, 'client_certificate': opts.client_certificate, 'client_certificate_key': opts.client_certificate_key, 'client_certificate_password': opts.client_certificate_password, 'quiet': opts.quiet, 'no_warnings': opts.no_warnings, 'forceurl': opts.geturl, 'forcetitle': opts.gettitle, 'forceid': opts.getid, 'forcethumbnail': opts.getthumbnail, 'forcedescription': opts.getdescription, 'forceduration': opts.getduration, 'forcefilename': opts.getfilename, 'forceformat': opts.getformat, 'forceprint': opts.forceprint, 'print_to_file': opts.print_to_file, 'forcejson': opts.dumpjson or opts.print_json, 'dump_single_json': opts.dump_single_json, 'force_write_download_archive': opts.force_write_download_archive, 'simulate': (print_only or any_getting or None) if opts.simulate is None else opts.simulate, 'skip_download': opts.skip_download, 'format': opts.format, 'allow_unplayable_formats': opts.allow_unplayable_formats, 'ignore_no_formats_error': opts.ignore_no_formats_error, 'format_sort': opts.format_sort, 'format_sort_force': opts.format_sort_force, 'allow_multiple_video_streams': opts.allow_multiple_video_streams, 'allow_multiple_audio_streams': opts.allow_multiple_audio_streams, 'check_formats': opts.check_formats, 'listformats': opts.listformats, 'listformats_table': opts.listformats_table, 'outtmpl': opts.outtmpl, 'outtmpl_na_placeholder': opts.outtmpl_na_placeholder, 'paths': opts.paths, 'autonumber_size': opts.autonumber_size, 'autonumber_start': opts.autonumber_start, 'restrictfilenames': opts.restrictfilenames, 'windowsfilenames': opts.windowsfilenames, 'ignoreerrors': opts.ignoreerrors, 'force_generic_extractor': opts.force_generic_extractor, 'allowed_extractors': opts.allowed_extractors or ['default'], 'ratelimit': opts.ratelimit, 'throttledratelimit': opts.throttledratelimit, 'overwrites': opts.overwrites, 'retries': opts.retries, 'file_access_retries': opts.file_access_retries, 'fragment_retries': opts.fragment_retries, 'extractor_retries': opts.extractor_retries, 'retry_sleep_functions': opts.retry_sleep, 'skip_unavailable_fragments': opts.skip_unavailable_fragments, 'keep_fragments': opts.keep_fragments, 'concurrent_fragment_downloads': opts.concurrent_fragment_downloads, 'buffersize': opts.buffersize, 'noresizebuffer': opts.noresizebuffer, 'http_chunk_size': opts.http_chunk_size, 'continuedl': opts.continue_dl, 'noprogress': opts.quiet if opts.noprogress is None else opts.noprogress, 'progress_with_newline': opts.progress_with_newline, 'progress_template': opts.progress_template, 'progress_delta': opts.progress_delta, 'playliststart': opts.playliststart, 'playlistend': opts.playlistend, 'playlistreverse': opts.playlist_reverse, 'playlistrandom': opts.playlist_random, 'lazy_playlist': opts.lazy_playlist, 'noplaylist': opts.noplaylist, 'logtostderr': opts.outtmpl.get('default') == '-', 'consoletitle': opts.consoletitle, 'nopart': opts.nopart, 'updatetime': opts.updatetime, 'writedescription': opts.writedescription, 'writeannotations': opts.writeannotations, 'writeinfojson': opts.writeinfojson, 'allow_playlist_files': opts.allow_playlist_files, 'clean_infojson': opts.clean_infojson, 'getcomments': opts.getcomments, 'writethumbnail': opts.writethumbnail is True, 'write_all_thumbnails': opts.writethumbnail == 'all', 'writelink': opts.writelink, 'writeurllink': opts.writeurllink, 'writewebloclink': opts.writewebloclink, 'writedesktoplink': opts.writedesktoplink, 'writesubtitles': opts.writesubtitles, 'writeautomaticsub': opts.writeautomaticsub, 'allsubtitles': opts.allsubtitles, 'listsubtitles': opts.listsubtitles, 'subtitlesformat': opts.subtitlesformat, 'subtitleslangs': opts.subtitleslangs, 'matchtitle': decodeOption(opts.matchtitle), 'rejecttitle': decodeOption(opts.rejecttitle), 'max_downloads': opts.max_downloads, 'prefer_free_formats': opts.prefer_free_formats, 'trim_file_name': opts.trim_file_name, 'verbose': opts.verbose, 'dump_intermediate_pages': opts.dump_intermediate_pages, 'write_pages': opts.write_pages, 'load_pages': opts.load_pages, 'test': opts.test, 'keepvideo': opts.keepvideo, 'min_filesize': opts.min_filesize, 'max_filesize': opts.max_filesize, 'min_views': opts.min_views, 'max_views': opts.max_views, 'daterange': opts.date, 'cachedir': opts.cachedir, 'youtube_print_sig_code': opts.youtube_print_sig_code, 'age_limit': opts.age_limit, 'download_archive': opts.download_archive, 'break_on_existing': opts.break_on_existing, 'break_on_reject': opts.break_on_reject, 'break_per_url': opts.break_per_url, 'skip_playlist_after_errors': opts.skip_playlist_after_errors, 'cookiefile': opts.cookiefile, 'cookiesfrombrowser': opts.cookiesfrombrowser, 'legacyserverconnect': opts.legacy_server_connect, 'nocheckcertificate': opts.no_check_certificate, 'prefer_insecure': opts.prefer_insecure, 'enable_file_urls': opts.enable_file_urls, 'http_headers': opts.headers, 'proxy': opts.proxy, 'socket_timeout': opts.socket_timeout, 'bidi_workaround': opts.bidi_workaround, 'debug_printtraffic': opts.debug_printtraffic, 'prefer_ffmpeg': opts.prefer_ffmpeg, 'include_ads': opts.include_ads, 'default_search': opts.default_search, 'dynamic_mpd': opts.dynamic_mpd, 'extractor_args': opts.extractor_args, 'youtube_include_dash_manifest': opts.youtube_include_dash_manifest, 'youtube_include_hls_manifest': opts.youtube_include_hls_manifest, 'encoding': opts.encoding, 'extract_flat': opts.extract_flat, 'live_from_start': opts.live_from_start, 'wait_for_video': opts.wait_for_video, 'mark_watched': opts.mark_watched, 'merge_output_format': opts.merge_output_format, 'final_ext': final_ext, 'postprocessors': postprocessors, 'fixup': opts.fixup, 'source_address': opts.source_address, 'impersonate': opts.impersonate, 'call_home': opts.call_home, 'sleep_interval_requests': opts.sleep_interval_requests, 'sleep_interval': opts.sleep_interval, 'max_sleep_interval': opts.max_sleep_interval, 'sleep_interval_subtitles': opts.sleep_interval_subtitles, 'external_downloader': opts.external_downloader, 'download_ranges': opts.download_ranges, 'force_keyframes_at_cuts': opts.force_keyframes_at_cuts, 'list_thumbnails': opts.list_thumbnails, 'playlist_items': opts.playlist_items, 'xattr_set_filesize': opts.xattr_set_filesize, 'match_filter': opts.match_filter, 'color': opts.color, 'ffmpeg_location': opts.ffmpeg_location, 'hls_prefer_native': opts.hls_prefer_native, 'hls_use_mpegts': opts.hls_use_mpegts, 'hls_split_discontinuity': opts.hls_split_discontinuity, 'external_downloader_args': opts.external_downloader_args, 'postprocessor_args': opts.postprocessor_args, 'cn_verification_proxy': opts.cn_verification_proxy, 'geo_verification_proxy': opts.geo_verification_proxy, 'geo_bypass': opts.geo_bypass, 'geo_bypass_country': opts.geo_bypass_country, 'geo_bypass_ip_block': opts.geo_bypass_ip_block, '_warnings': warnings, '_deprecation_warnings': deprecation_warnings, 'compat_opts': opts.compat_opts, })
Passthrough parent module into a child module, creating the parent if necessary
def passthrough_module(parent, child, allowed_attributes=(..., ), *, callback=lambda _: None): """Passthrough parent module into a child module, creating the parent if necessary""" def __getattr__(attr): if _is_package(parent): with contextlib.suppress(ModuleNotFoundError): return importlib.import_module(f'.{attr}', parent.__name__) ret = from_child(attr) if ret is _NO_ATTRIBUTE: raise AttributeError(f'module {parent.__name__} has no attribute {attr}') callback(attr) return ret @functools.lru_cache(maxsize=None) def from_child(attr): nonlocal child if attr not in allowed_attributes: if ... not in allowed_attributes or _is_dunder(attr): return _NO_ATTRIBUTE if isinstance(child, str): child = importlib.import_module(child, parent.__name__) if _is_package(child): with contextlib.suppress(ImportError): return passthrough_module(f'{parent.__name__}.{attr}', importlib.import_module(f'.{attr}', child.__name__)) with contextlib.suppress(AttributeError): return getattr(child, attr) return _NO_ATTRIBUTE parent = sys.modules.get(parent, types.ModuleType(parent)) parent.__class__ = EnhancedModule parent.__getattr__ = __getattr__ return parent
Detect format of image (Currently supports jpeg, png, webp, gif only) Ref: https://github.com/python/cpython/blob/3.10/Lib/imghdr.py
def what(file=None, h=None): """Detect format of image (Currently supports jpeg, png, webp, gif only) Ref: https://github.com/python/cpython/blob/3.10/Lib/imghdr.py """ if h is None: with open(file, 'rb') as f: h = f.read(12) return next((type_ for type_, test in tests.items() if test(h)), None)
Convert urllib Request to a networking Request
def urllib_req_to_req(urllib_request): """Convert urllib Request to a networking Request""" from ..networking import Request from ..utils.networking import HTTPHeaderDict return Request( urllib_request.get_full_url(), data=urllib_request.data, method=urllib_request.get_method(), headers=HTTPHeaderDict(urllib_request.headers, urllib_request.unredirected_hdrs), extensions={'timeout': urllib_request.timeout} if hasattr(urllib_request, 'timeout') else None)
Given the name of the executable, see whether we support the given downloader
def get_external_downloader(external_downloader): """ Given the name of the executable, see whether we support the given downloader """ bn = os.path.splitext(os.path.basename(external_downloader))[0] return _BY_NAME.get(bn) or next(( klass for klass in _BY_NAME.values() if klass.EXE_NAME in bn ), None)
Return a list of (segment, fragment) for each fragment in the video
def build_fragments_list(boot_info): """ Return a list of (segment, fragment) for each fragment in the video """ res = [] segment_run_table = boot_info['segments'][0] fragment_run_entry_table = boot_info['fragments'][0]['fragments'] first_frag_number = fragment_run_entry_table[0]['first'] fragments_counter = itertools.count(first_frag_number) for segment, fragments_count in segment_run_table['segment_run']: # In some live HDS streams (e.g. Rai), `fragments_count` is # abnormal and causing out-of-memory errors. It's OK to change the # number of fragments for live streams as they are updated periodically if fragments_count == 4294967295 and boot_info['live']: fragments_count = 2 for _ in range(fragments_count): res.append((segment, next(fragments_counter))) if boot_info['live']: res = res[-2:] return res
Writes the FLV header to stream
def write_flv_header(stream): """Writes the FLV header to stream""" # FLV header stream.write(b'FLV\x01') stream.write(b'\x05') stream.write(b'\x00\x00\x00\x09') stream.write(b'\x00\x00\x00\x00')
Writes optional metadata tag to stream
def write_metadata_tag(stream, metadata): """Writes optional metadata tag to stream""" SCRIPT_TAG = b'\x12' FLV_TAG_HEADER_LEN = 11 if metadata: stream.write(SCRIPT_TAG) write_unsigned_int_24(stream, len(metadata)) stream.write(b'\x00\x00\x00\x00\x00\x00\x00') stream.write(metadata) write_unsigned_int(stream, FLV_TAG_HEADER_LEN + len(metadata))
Get the downloader class that can handle the info dict.
def _get_suitable_downloader(info_dict, protocol, params, default): """Get the downloader class that can handle the info dict.""" if default is NO_DEFAULT: default = HttpFD if (info_dict.get('section_start') or info_dict.get('section_end')) and FFmpegFD.can_download(info_dict): return FFmpegFD info_dict['protocol'] = protocol downloaders = params.get('external_downloader') external_downloader = ( downloaders if isinstance(downloaders, str) or downloaders is None else downloaders.get(shorten_protocol_name(protocol, True), downloaders.get('default'))) if external_downloader is None: if info_dict['to_stdout'] and FFmpegFD.can_merge_formats(info_dict, params): return FFmpegFD elif external_downloader.lower() != 'native': ed = get_external_downloader(external_downloader) if ed.can_download(info_dict, external_downloader): return ed if protocol == 'http_dash_segments': if info_dict.get('is_live') and (external_downloader or '').lower() != 'native': return FFmpegFD if protocol in ('m3u8', 'm3u8_native'): if info_dict.get('is_live'): return FFmpegFD elif (external_downloader or '').lower() == 'native': return HlsFD elif protocol == 'm3u8_native' and get_suitable_downloader( info_dict, params, None, protocol='m3u8_frag_urls', to_stdout=info_dict['to_stdout']): return HlsFD elif params.get('hls_prefer_native') is True: return HlsFD elif params.get('hls_prefer_native') is False: return FFmpegFD return PROTOCOL_MAP.get(protocol, default)
Add a handler for opening URLs, like _download_webpage
def add_opener(ydl, handler): # FIXME: Create proper API in .networking """Add a handler for opening URLs, like _download_webpage""" # https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426 # https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605 rh = ydl._request_director.handlers['Urllib'] if 'abematv-license' in rh._SUPPORTED_URL_SCHEMES: return headers = ydl.params['http_headers'].copy() proxies = ydl.proxies.copy() clean_proxies(proxies, headers) opener = rh._get_instance(cookiejar=ydl.cookiejar, proxies=proxies) assert isinstance(opener, urllib.request.OpenerDirector) opener.add_handler(handler) rh._SUPPORTED_URL_SCHEMES = (*rh._SUPPORTED_URL_SCHEMES, 'abematv-license')
Return the content of the tag with the specified attribute in the passed HTML document
def _get_elements_by_tag_and_attrib(html, tag=None, attribute=None, value=None, escape_value=True): """Return the content of the tag with the specified attribute in the passed HTML document""" if tag is None: tag = '[a-zA-Z0-9:._-]+' if attribute is None: attribute = '' else: attribute = r'\s+(?P<attribute>%s)' % re.escape(attribute) if value is None: value = '' else: value = re.escape(value) if escape_value else value value = '=[\'"]?(?P<value>%s)[\'"]?' % value retlist = [] for m in re.finditer(r'''(?xs) <(?P<tag>%s) (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? %s%s (?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*? \s*> (?P<content>.*?) </\1> ''' % (tag, attribute, value), html): retlist.append(m) return retlist
Source: https://stackoverflow.com/questions/24437823/getting-instagram-post-url-from-media-id
def _pk_to_id(id): """Source: https://stackoverflow.com/questions/24437823/getting-instagram-post-url-from-media-id""" return encode_base_n(int(id.split('_')[0]), table=_ENCODING_CHARS)
Covert a shortcode to a numeric value
def _id_to_pk(shortcode): """Covert a shortcode to a numeric value""" return decode_base_n(shortcode[:11], table=_ENCODING_CHARS)
Return a list of supported extractors. The order does matter; the first extractor matched is the one handling the URL.
def gen_extractor_classes(): """ Return a list of supported extractors. The order does matter; the first extractor matched is the one handling the URL. """ from .extractors import _ALL_CLASSES return _ALL_CLASSES
Return a list of an instance of every supported extractor. The order does matter; the first extractor matched is the one handling the URL.
def gen_extractors(): """ Return a list of an instance of every supported extractor. The order does matter; the first extractor matched is the one handling the URL. """ return [klass() for klass in gen_extractor_classes()]
Return a list of extractors that are suitable for the given age, sorted by extractor name
def list_extractor_classes(age_limit=None): """Return a list of extractors that are suitable for the given age, sorted by extractor name""" from .generic import GenericIE yield from sorted(filter( lambda ie: ie.is_suitable(age_limit) and ie != GenericIE, gen_extractor_classes()), key=lambda ie: ie.IE_NAME.lower()) yield GenericIE
Return a list of extractor instances that are suitable for the given age, sorted by extractor name
def list_extractors(age_limit=None): """Return a list of extractor instances that are suitable for the given age, sorted by extractor name""" return [ie() for ie in list_extractor_classes(age_limit)]
Returns the info extractor class with the given ie_name
def get_info_extractor(ie_name): """Returns the info extractor class with the given ie_name""" from . import extractors return getattr(extractors, f'{ie_name}IE')