desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'@param childId: int 子节点的id return Defered Object'
def callChild(self, key, *args, **kw):
return self.childsmanager.callChild(key, *args, **kw)
'@param childId: int 子节点的id return Defered Object'
def callChildNotForResult(self, childname, *args, **kw):
self.childsmanager.callChildNotForResult(childname, *args, **kw)
''
def __init__(self):
self.netfactory = None self.root = None self.webroot = None self.remote = {} self.master_remote = None self.db = None self.mem = None self.servername = None self.remoteportlist = []
''
def config(self, config, servername=None, dbconfig=None, memconfig=None, masterconf=None):
GlobalObject().json_config = config netport = config.get('netport') webport = config.get('webport') rootport = config.get('rootport') self.remoteportlist = config.get('remoteport', []) if (not servername): servername = config.get('name') logpath = config.get('log') hasdb = config.get('db') hasmem = config.get('mem') app = config.get('app') cpuid = config.get('cpu') mreload = config.get('reload') self.servername = servername if netport: self.netfactory = LiberateFactory() netservice = services.CommandService('netservice') self.netfactory.addServiceChannel(netservice) reactor.listenTCP(netport, self.netfactory) if webport: self.webroot = Flask('servername') GlobalObject().webroot = self.webroot reactor.listenWSGI(webport, self.webroot) if rootport: self.root = PBRoot() rootservice = services.Service('rootservice') self.root.addServiceChannel(rootservice) reactor.listenTCP(rootport, BilateralFactory(self.root)) for cnf in self.remoteportlist: rname = cnf.get('rootname') self.remote[rname] = RemoteObject(self.servername) if (hasdb and dbconfig): log.msg(str(dbconfig)) dbpool.initPool(**dbconfig) if (hasmem and memconfig): urls = memconfig.get('urls') hostname = str(memconfig.get('hostname')) mclient.connect(urls, hostname) if logpath: log.addObserver(loogoo(logpath)) log.startLogging(sys.stdout) if cpuid: affinity.set_process_affinity_mask(os.getpid(), cpuid) GlobalObject().config(netfactory=self.netfactory, root=self.root, remote=self.remote) if masterconf: masterport = masterconf.get('rootport') masterhost = masterconf.get('roothost') self.master_remote = RemoteObject(servername) addr = (('localhost', masterport) if (not masterhost) else (masterhost, masterport)) self.master_remote.connect(addr) GlobalObject().masterremote = self.master_remote import admin if app: __import__(app) if mreload: _path_list = mreload.split('.') GlobalObject().reloadmodule = __import__(mreload, fromlist=_path_list[:1]) GlobalObject().remote_connect = self.remote_connect
'进行rpc的连接'
def remote_connect(self, rname, rhost):
for cnf in self.remoteportlist: _rname = cnf.get('rootname') if (rname == _rname): rport = cnf.get('rootport') if (not rhost): addr = ('localhost', rport) else: addr = (rhost, rport) self.remote[rname].connect(addr) break
''
def start(self):
log.msg(('[%s] started...' % self.servername)) log.msg(('[%s] pid: %s' % (self.servername, os.getpid()))) reactor.run()
''
def config(self, netfactory=None, root=None, remote=None, db=None):
self.netfactory = netfactory self.root = root self.remote = remote self.db = db
'@param url: str http 访问的路埄'
def __init__(self, url, **kw):
self._url = url self.kw = kw
''
def __call__(self, cls):
if self._url: child_name = self._url else: child_name = cls.__name__ return GlobalObject().webroot.route(child_name, **self.kw)(cls)
''
def __init__(self, remotename):
self.remotename = remotename
''
def __call__(self, target):
GlobalObject().remote[self.remotename]._reference._service.mapTarget(target)
''
def __init__(self, logpath):
self.file = file(logpath, 'w')
''
def __call__(self, eventDict):
if ('logLevel' in eventDict): level = eventDict['logLevel'] elif eventDict['isError']: level = 'ERROR' else: level = 'INFO' text = log.textFromEventDict(eventDict) if ((text is None) or (level != 'ERROR')): return nowdate = datetime.datetime.now() self.file.write((((((('[' + str(nowdate)) + ']\n') + str(level)) + '\n DCTB ') + text) + '\r\n')) self.file.flush()
''
def getHeadlength():
pass
'Add a target unDisplay when client call it.'
def addUnDisplayTarget(self, command):
self.unDisplay.add(command)
'Add a target to the service.'
def mapTarget(self, target):
key = target.__name__ if self._targets.has_key(key): exist_target = self._targets.get(key) raise ('target [%d] Already exists, Conflict between the %s and %s' % (key, exist_target.__name__, target.__name__)) self._targets[key] = target
'Remove a target from the service.'
def unMapTarget(self, target):
key = target.__name__ if (key in self._targets): del self._targets[key]
'Remove a target from the service.'
def unMapTargetByKey(self, targetKey):
del self._targets[targetKey]
'Get a target from the service by name.'
def getTarget(self, targetKey):
target = self._targets.get(targetKey, None) return target
'call Target @param conn: client connection @param targetKey: target ID @param data: client data'
def callTarget(self, targetKey, *args, **kw):
target = self.getTarget(targetKey) if (not target): log.err((('the command ' + str(targetKey)) + ' not Found on service')) return None if (targetKey not in self.unDisplay): log.msg(('call method %s on service[single]' % target.__name__)) response = target(*args, **kw) return response
'Add a target to the service.'
def mapTarget(self, target):
key = int(target.__name__.split('_')[(-1)]) if self._targets.has_key(key): exist_target = self._targets.get(key) raise ('target [%d] Already exists, Conflict between the %s and %s' % (key, exist_target.__name__, target.__name__)) self._targets[key] = target
'Remove a target from the service.'
def unMapTarget(self, target):
key = int(target.__name__.split('_')[(-1)]) if (key in self._targets): del self._targets[key]
'add_argument(dest, ..., name=value, ...) add_argument(option_string, option_string, ..., name=value, ...)'
def add_argument(self, *args, **kwargs):
chars = self.prefix_chars if ((not args) or ((len(args) == 1) and (args[0][0] not in chars))): if (args and ('dest' in kwargs)): raise ValueError('dest supplied twice for positional argument') kwargs = self._get_positional_kwargs(*args, **kwargs) else: kwargs = self._get_optional_kwargs(*args, **kwargs) if ('default' not in kwargs): dest = kwargs['dest'] if (dest in self._defaults): kwargs['default'] = self._defaults[dest] elif (self.argument_default is not None): kwargs['default'] = self.argument_default action_class = self._pop_action_class(kwargs) if (not _callable(action_class)): raise ValueError(('unknown action "%s"' % action_class)) action = action_class(**kwargs) type_func = self._registry_get('type', action.type, action.type) if (not _callable(type_func)): raise ValueError(('%r is not callable' % type_func)) return self._add_action(action)
'error(message: string) Prints a usage message incorporating the message to stderr and exits. If you override this in a subclass, it should not return -- it should either exit or raise an exception.'
def error(self, message):
self.print_usage(_sys.stderr) self.exit(2, (_('%s: error: %s\n') % (self.prog, message)))
'Returns a list of subjects available on the website.'
def get_clips(self):
flatList = self._get_clips() items = [] for (key, group) in groupby(flatList, (lambda x: x.category)): clipsList = list((clip for clip in group)) items.append(Category(key, len(clipsList))) return items
':return: List of Channels :rtype : list ChannelItem'
def get_channels(self):
return get_channels()
':param channelID: :return: List of programs for a Channel ID :rtype: list of ProgramItem'
def get_channel_programs(self, channelID):
return get_channel_programs(channelID)
':param programID: Program ID :param mediaType: Media type - either \'episodes\' or \'clips\' :return: List of media items for the current Program ID and media type :rtype: list of MediaItem'
def get_program_media(self, programID, mediaType):
return get_program_media(programID, mediaType)
'Quality can be one of the following options: -> "360p LOW", "720p HD", "240p LOWEST", "520p HIGH"'
def get_media_stream_by_media_id(self, quality, programID, mediaType, mediaID):
return get_media_stream_by_media_id(quality, programID, mediaType, mediaID)
'The actual task for the job should be implemented here.'
def run(self):
pass
'Handle return value by appending to the ``self.result`` queue.'
def _return(self, r):
self.result.put(r)
'Does nothing in Python 2.4'
def task_done(self):
pass
'Does nothing in Python 2.4'
def join(self):
pass
'Add another worker to the pool.'
def grow(self):
t = self.worker_factory(self) t.start() self._size += 1
'Get rid of one worker from the pool. Raises IndexError if empty.'
def shrink(self):
if (self._size <= 0): raise IndexError('pool is already empty') self._size -= 1 self.put(SuicideJob())
'Retire the workers.'
def shutdown(self):
for i in xrange(self.size()): self.put(SuicideJob())
'Approximate number of active workers (could be more if a shrinking is in progress).'
def size(self):
return self._size
'Perform a map operation distributed among the workers. Will block until done.'
def map(self, fn, *seq):
results = Queue() args = zip(*seq) for seq in args: j = SimpleJob(results, fn, seq) self.put(j) r = [] for i in xrange(len(args)): r.append(results.get()) return r
'DEPRECATED: Use join() instead.'
def wait(self):
self.join()
'Get jobs from the queue and perform them as they arrive.'
def run(self):
while 1: job = self.jobs.get() try: job.run() self.jobs.task_done() except TerminationNotice: self.jobs.task_done() break
'Get jobs from the queue and perform them as they arrive.'
def run(self):
while 1: job = self.jobs.get() try: job.run(toolbox=self.toolbox) self.jobs.task_done() except TerminationNotice: self.jobs.task_done() break
'Should we redirect and where to? :returns: Truthy redirect location string if we got a redirect status code and valid location. ``None`` if redirect status and no location. ``False`` if not a redirect status code.'
def get_redirect_location(self):
if (self.status in [301, 302, 303, 307]): return self.headers.get('location') return False
'Similar to :meth:`httplib.HTTPResponse.read`, but with two additional parameters: ``decode_content`` and ``cache_content``. :param amt: How much of the content to read. If specified, decoding and caching is skipped because we can\'t decode partial content nor does it make sense to cache partial content as the full response. :param decode_content: If True, will attempt to decode the body based on the \'content-encoding\' header. (Overridden if ``amt`` is set.) :param cache_content: If True, will save the returned data such that the same result is returned despite of the state of the underlying file object. This is useful if you want the ``.data`` property to continue working after having ``.read()`` the file object. (Overridden if ``amt`` is set.)'
def read(self, amt=None, decode_content=None, cache_content=False):
content_encoding = self.headers.get('content-encoding') decoder = self.CONTENT_DECODERS.get(content_encoding) if (decode_content is None): decode_content = self._decode_content if (self._fp is None): return try: if (amt is None): data = self._fp.read() else: return self._fp.read(amt) try: if (decode_content and decoder): data = decoder(data) except IOError: raise HTTPError(('Received response with content-encoding: %s, but failed to decode it.' % content_encoding)) if cache_content: self._body = data return data finally: if (self._original_response and self._original_response.isclosed()): self.release_conn()
'Given an :class:`httplib.HTTPResponse` instance ``r``, return a corresponding :class:`urllib3.response.HTTPResponse` object. Remaining parameters are passed to the HTTPResponse constructor, along with ``original_response=r``.'
@classmethod def from_httplib(ResponseCls, r, **response_kw):
headers = {} for (k, v) in r.getheaders(): k = k.lower() has_value = headers.get(k) if has_value: v = ', '.join([has_value, v]) headers[k] = v strict = getattr(r, 'strict', 0) return ResponseCls(body=r, headers=headers, status=r.status, version=r.version, reason=r.reason, strict=strict, original_response=r, **response_kw)
'Get a :class:`ConnectionPool` based on the host, port, and scheme. Note that an appropriate ``port`` value is required here to normalize connection pools in our container most effectively.'
def connection_from_host(self, host, port=80, scheme='http'):
pool_key = (scheme, host, port) pool = self.pools.get(pool_key) if pool: return pool pool_cls = pool_classes_by_scheme[scheme] pool = pool_cls(host, port, **self.connection_pool_kw) self.pools[pool_key] = pool return pool
'Similar to :func:`urllib3.connectionpool.connection_from_url` but doesn\'t pass any additional parameters to the :class:`urllib3.connectionpool.ConnectionPool` constructor. Additional parameters are taken from the :class:`.PoolManager` constructor.'
def connection_from_url(self, url):
(scheme, host, port) = get_host(url) port = (port or port_by_scheme.get(scheme, 80)) return self.connection_from_host(host, port=port, scheme=scheme)
'Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`. ``url`` must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.'
def urlopen(self, method, url, **kw):
conn = self.connection_from_url(url) try: return conn.urlopen(method, url, **kw) except HostChangedError as e: kw['retries'] = e.retries return self.urlopen(method, e.url, **kw)
'Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute.'
def urlopen(self, method, url, **kw):
kw['assert_same_host'] = False kw['headers'] = self._set_proxy_headers(kw.get('headers')) return self.proxy_pool.urlopen(method, url, **kw)
'If exists: Invalidate old entry and return it.'
def _invalidate_entry(self, key):
old_entry = self.access_lookup.get(key) if old_entry: old_entry.is_valid = False return old_entry
'Push entry onto our access log, invalidate the old entry if exists.'
def _push_entry(self, key):
self._invalidate_entry(key) new_entry = AccessEntry(key) self.access_lookup[key] = new_entry self.access_log_lock.acquire() self.access_log.appendleft(new_entry) self.access_log_lock.release()
'Pop entries from our access log until we popped ``num`` valid ones.'
def _prune_entries(self, num):
while (num > 0): self.access_log_lock.acquire() p = self.access_log.pop() self.access_log_lock.release() if (not p.is_valid): continue dict.pop(self, p.key, None) self.access_lookup.pop(p.key, None) num -= 1
'Rebuild our access_log without the invalidated entries.'
def _prune_invalidated_entries(self):
self.access_log_lock.acquire() self.access_log = deque((e for e in self.access_log if e.is_valid)) self.access_log_lock.release()
'Return ordered access keys for inspection. Used for testing.'
def _get_ordered_access_keys(self):
self.access_log_lock.acquire() r = [e.key for e in self.access_log if e.is_valid] self.access_log_lock.release() return r
'Return a fresh :class:`httplib.HTTPConnection`.'
def _new_conn(self):
self.num_connections += 1 log.info(('Starting new HTTP connection (%d): %s' % (self.num_connections, self.host))) return HTTPConnection(host=self.host, port=self.port)
'Get a connection. Will return a pooled connection if one is available. If no connections are available and :prop:`.block` is ``False``, then a fresh connection is returned. :param timeout: Seconds to wait before giving up and raising :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and :prop:`.block` is ``True``.'
def _get_conn(self, timeout=None):
conn = None try: conn = self.pool.get(block=self.block, timeout=timeout) if (conn and is_connection_dropped(conn)): log.info(('Resetting dropped connection: %s' % self.host)) conn.close() except Empty: if self.block: raise EmptyPoolError(self, 'Pool reached maximum size and no more connections are allowed.') pass return (conn or self._new_conn())
'Put a connection back into the pool. :param conn: Connection object for the current host and port as returned by :meth:`._new_conn` or :meth:`._get_conn`. If the pool is already full, the connection is discarded because we exceeded maxsize. If connections are discarded frequently, then maxsize should be increased.'
def _put_conn(self, conn):
try: self.pool.put(conn, block=False) except Full: log.warning(('HttpConnectionPool is full, discarding connection: %s' % self.host))
'Perform a request on a given httplib connection object taken from our pool.'
def _make_request(self, conn, method, url, timeout=_Default, **httplib_request_kw):
self.num_requests += 1 if (timeout is _Default): timeout = self.timeout conn.timeout = timeout conn.request(method, url, **httplib_request_kw) sock = getattr(conn, 'sock', False) if sock: sock.settimeout(timeout) httplib_response = conn.getresponse() log.debug(('"%s %s %s" %s %s' % (method, url, conn._http_vsn_str, httplib_response.status, httplib_response.length))) return httplib_response
'Check if the given ``url`` is a member of the same host as this connection pool.'
def is_same_host(self, url):
(scheme, host, port) = get_host(url) if (self.port and (not port)): port = port_by_scheme.get(scheme) return (url.startswith('/') or ((scheme, host, port) == (self.scheme, self.host, self.port)))
'Get a connection from the pool and perform an HTTP request. This is the lowest level call for making a request, so you\'ll need to specify all the raw details. .. note:: More commonly, it\'s appropriate to use a convenience method provided by :class:`.RequestMethods`, such as :meth:`request`. .. note:: `release_conn` will only behave as expected if `preload_content=False` because we want to make `preload_content=False` the default behaviour someday soon without breaking backwards compatibility. :param method: HTTP request method (such as GET, POST, PUT, etc.) :param body: Data to send in the request body (useful for creating POST requests, see HTTPConnectionPool.post_url for more convenience). :param headers: Dictionary of custom headers to send, such as User-Agent, If-None-Match, etc. If None, pool headers are used. If provided, these headers completely replace any pool-specific headers. :param retries: Number of retries to allow before raising a MaxRetryError exception. :param redirect: Automatically handle redirects (status codes 301, 302, 303, 307), each redirect counts as a retry. :param assert_same_host: If ``True``, will make sure that the host of the pool requests is consistent else will raise HostChangedError. When False, you can use the pool on an HTTP proxy and request foreign hosts. :param timeout: If specified, overrides the default timeout for this one request. :param pool_timeout: If set and the pool is set to block=True, then this method will block for ``pool_timeout`` seconds and raise EmptyPoolError if no connection is available within the time period. :param release_conn: If False, then the urlopen call will not release the connection back into the pool once a response is received (but will release if you read the entire contents of the response such as when `preload_content=True`). This is useful if you\'re not preloading the response\'s content immediately. You will need to call ``r.release_conn()`` on the response ``r`` to return the connection back into the pool. If None, it takes the value of ``response_kw.get(\'preload_content\', True)``. :param \**response_kw: Additional parameters are passed to :meth:`urllib3.response.HTTPResponse.from_httplib`'
def urlopen(self, method, url, body=None, headers=None, retries=3, redirect=True, assert_same_host=True, timeout=_Default, pool_timeout=None, release_conn=None, **response_kw):
if (headers is None): headers = self.headers if (retries < 0): raise MaxRetryError(self, url) if (timeout is _Default): timeout = self.timeout if (release_conn is None): release_conn = response_kw.get('preload_content', True) if (assert_same_host and (not self.is_same_host(url))): host = ('%s://%s' % (self.scheme, self.host)) if self.port: host = ('%s:%d' % (host, self.port)) raise HostChangedError(self, url, (retries - 1)) conn = None try: conn = self._get_conn(timeout=pool_timeout) httplib_response = self._make_request(conn, method, url, timeout=timeout, body=body, headers=headers) response_conn = ((not release_conn) and conn) response = HTTPResponse.from_httplib(httplib_response, pool=self, connection=response_conn, **response_kw) except Empty as e: raise TimeoutError(self, ('Request timed out. (pool_timeout=%s)' % pool_timeout)) except SocketTimeout as e: raise TimeoutError(self, ('Request timed out. (timeout=%s)' % timeout)) except BaseSSLError as e: raise SSLError(e) except CertificateError as e: raise SSLError(e) except (HTTPException, SocketError) as e: conn = None err = e finally: if (conn and release_conn): self._put_conn(conn) if (not conn): log.warn(("Retrying (%d attempts remain) after connection broken by '%r': %s" % (retries, err, url))) return self.urlopen(method, url, body, headers, (retries - 1), redirect, assert_same_host) redirect_location = (redirect and response.get_redirect_location()) if redirect_location: log.info(('Redirecting %s -> %s' % (url, redirect_location))) return self.urlopen(method, redirect_location, body, headers, (retries - 1), redirect, assert_same_host) return response
'Return a fresh :class:`httplib.HTTPSConnection`.'
def _new_conn(self):
self.num_connections += 1 log.info(('Starting new HTTPS connection (%d): %s' % (self.num_connections, self.host))) if (not ssl): if ((not HTTPSConnection) or (HTTPSConnection is object)): raise SSLError("Can't connect to HTTPS URL because the SSL module is not available.") return HTTPSConnection(host=self.host, port=self.port) connection = VerifiedHTTPSConnection(host=self.host, port=self.port) connection.set_cert(key_file=self.key_file, cert_file=self.cert_file, cert_reqs=self.cert_reqs, ca_certs=self.ca_certs) return connection
'authurl is a random URL on the server that is protected by NTLM. user is the Windows user, probably in the DOMAIN\username format. pw is the password for the user.'
def __init__(self, user, pw, authurl, *args, **kwargs):
super(NTLMConnectionPool, self).__init__(*args, **kwargs) self.authurl = authurl self.rawuser = user user_parts = user.split('\\', 1) self.domain = user_parts[0].upper() self.user = user_parts[1] self.pw = pw
'Make a request using :meth:`urlopen` with the appropriate encoding of ``fields`` based on the ``method`` used. This is a convenience method that requires the least amount of manual effort. It can be used in most situations, while still having the option to drop down to more specific methods when necessary, such as :meth:`request_encode_url`, :meth:`request_encode_body`, or even the lowest level :meth:`urlopen`.'
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
method = method.upper() if (method in self._encode_url_methods): return self.request_encode_url(method, url, fields=fields, headers=headers, **urlopen_kw) else: return self.request_encode_body(method, url, fields=fields, headers=headers, **urlopen_kw)
'Make a request using :meth:`urlopen` with the ``fields`` encoded in the url. This is useful for request methods like GET, HEAD, DELETE, etc.'
def request_encode_url(self, method, url, fields=None, **urlopen_kw):
if fields: url += ('?' + urlencode(fields)) return self.urlopen(method, url, **urlopen_kw)
'Make a request using :meth:`urlopen` with the ``fields`` encoded in the body. This is useful for request methods like POST, PUT, PATCH, etc. When ``encode_multipart=True`` (default), then :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode the payload with the appropriate content type. Otherwise :meth:`urllib.urlencode` is used with the \'application/x-www-form-urlencoded\' content type. Multipart encoding must be used when posting files, and it\'s reasonably safe to use it in other times too. However, it may break request signing, such as with OAuth. Supports an optional ``fields`` parameter of key/value strings AND key/filetuple. A filetuple is a (filename, data) tuple. For example: :: fields = { \'foo\': \'bar\', \'fakefile\': (\'foofile.txt\', \'contents of foofile\'), \'realfile\': (\'barfile.txt\', open(\'realfile\').read()), \'nonamefile\': (\'contents of nonamefile field\'), When uploading a file, providing a filename (the first parameter of the tuple) is optional but recommended to best mimick behavior of browsers. Note that if ``headers`` are supplied, the \'Content-Type\' header will be overwritten because it depends on the dynamic random boundary string which is used to compose the body of the request. The random boundary string can be explicitly set with the ``multipart_boundary`` parameter.'
def request_encode_body(self, method, url, fields=None, headers=None, encode_multipart=True, multipart_boundary=None, **urlopen_kw):
if encode_multipart: (body, content_type) = encode_multipart_formdata((fields or {}), boundary=multipart_boundary) else: (body, content_type) = (urlencode((fields or {})), 'application/x-www-form-urlencoded') headers = (headers or {}) headers.update({'Content-Type': content_type}) return self.urlopen(method, url, body=body, headers=headers, **urlopen_kw)
'return a list of connected hosts and the number of connections to each. [(\'foo.com:80\', 2), (\'bar.org\', 1)]'
def open_connections(self):
return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
'close connection(s) to <host> host is the host:port spec, as in \'www.cnn.com:8080\' as passed in. no error occurs if there is no connection to that host.'
def close_connection(self, host):
for h in self._cm.get_all(host): self._cm.remove(h) h.close()
'close all open connections'
def close_all(self):
for (host, conns) in self._cm.get_all().items(): for h in conns: self._cm.remove(h) h.close()
'tells us that this request is now closed and the the connection is ready for another request'
def _request_closed(self, request, host, connection):
self._cm.set_ready(connection, 1)
'start the transaction with a re-used connection return a response object (r) upon success or None on failure. This DOES not close or remove bad connections in cases where it returns. However, if an unexpected exception occurs, it will close and remove the connection before re-raising.'
def _reuse_connection(self, h, req, host):
try: self._start_transaction(h, req) r = h.getresponse() except (socket.error, httplib.HTTPException): r = None except: if DEBUG: DEBUG.error(('unexpected exception - closing ' + 'connection to %s (%d)'), host, id(h)) self._cm.remove(h) h.close() raise if ((r is None) or (r.version == 9)): if DEBUG: DEBUG.info('failed to re-use connection to %s (%d)', host, id(h)) r = None elif DEBUG: DEBUG.info('re-using connection to %s (%d)', host, id(h)) return r
'Create JSON object representing the query from request recieved from Dashboard. :param request: :return:'
def create_from_request(self, request):
query_json = {'process_type': DVAPQL.QUERY} count = request.POST.get('count') excluded = json.loads(request.POST.get('excluded_index_entries')) selected_indexers = json.loads(request.POST.get('selected_indexers')) approximate = (True if (request.POST.get('approximate') == 'true') else False) query_json['image_data_b64'] = request.POST.get('image_url')[22:] query_json['indexer_queries'] = [] for k in selected_indexers: query_json['indexer_queries'].append({'algorithm': k, 'count': count, 'excluded_index_entries_pk': ([int(epk) for epk in excluded] if excluded else []), 'approximate': approximate}) user = (request.user if request.user.is_authenticated else None) self.create_from_json(query_json, user) return self.query
'# TODO: speed this up by skipping refreshes when count is unchanged. :param index_name: :return:'
def refresh_index(self, index_name):
index_entries = IndexEntries.objects.all() visual_index = self.visual_retriever[index_name] for index_entry in index_entries: if ((index_entry.pk not in visual_index.loaded_entries) and (index_entry.algorithm == index_name) and (index_entry.count > 0)): fname = '{}/{}/indexes/{}'.format(settings.MEDIA_ROOT, index_entry.video_id, index_entry.features_file_name) vectors = indexer.np.load(fname) vector_entries = json.load(file('{}/{}/indexes/{}'.format(settings.MEDIA_ROOT, index_entry.video_id, index_entry.entries_file_name))) logging.info('Starting {} in {} with shape {}'.format(index_entry.video_id, visual_index.name, vectors.shape)) start_index = visual_index.findex try: visual_index.load_index(vectors, vector_entries) except: logging.info('ERROR Failed to load {} vectors shape {} entries {}'.format(index_entry.video_id, vectors.shape, len(vector_entries))) visual_index.loaded_entries[index_entry.pk] = indexer.IndexRange(start=start_index, end=(visual_index.findex - 1)) logging.info('finished {} in {}, current shape {}, range'.format(index_entry.video_id, visual_index.name, visual_index.index.shape, visual_index.loaded_entries[index_entry.pk].start, visual_index.loaded_entries[index_entry.pk].end))
'Construct the network.'
def setup(self):
raise NotImplementedError('Must be implemented by the subclass.')
'Load network weights. data_path: The path to the numpy-serialized network weights session: The current TensorFlow session ignore_missing: If true, serialized weights for missing layers are ignored.'
def load(self, data_path, session, ignore_missing=False):
data_dict = np.load(data_path, encoding='latin1').item() for op_name in data_dict: with tf.variable_scope(op_name, reuse=True): for (param_name, data) in iteritems(data_dict[op_name]): try: var = tf.get_variable(param_name) session.run(var.assign(data)) except ValueError: if (not ignore_missing): raise
'Set the input(s) for the next operation by replacing the terminal nodes. The arguments can be either layer names or the actual layers.'
def feed(self, *args):
assert (len(args) != 0) self.terminals = [] for fed_layer in args: if isinstance(fed_layer, string_types): try: fed_layer = self.layers[fed_layer] except KeyError: raise KeyError(('Unknown layer name fed: %s' % fed_layer)) self.terminals.append(fed_layer) return self
'Returns the current network output.'
def get_output(self):
return self.terminals[(-1)]
'Returns an index-suffixed unique name for the given prefix. This is used for auto-generating layer names based on the type-prefix.'
def get_unique_name(self, prefix):
ident = (sum((t.startswith(prefix) for (t, _) in self.layers.items())) + 1) return ('%s_%d' % (prefix, ident))
'Creates a new TensorFlow variable.'
def make_var(self, name, shape):
return tf.get_variable(name, shape, trainable=self.trainable)
'Verifies that the padding is one of the supported ones.'
def validate_padding(self, padding):
assert (padding in ('SAME', 'VALID'))
'Simplify this mess haivng a seperate create vs load/init'
def __init__(self, fnames, n_components, model_proto_filename, m, v, sub, test_mode=False, dc=None):
data = [] self.dc = dc self.fnames = fnames self.entries = [] for fname in fnames: nmat = np.load(fname) if (nmat.ndim > 2): nmat = nmat.squeeze() data.append(nmat) for e in json.load(file(fname.replace('npy', 'json'))): self.entries.append(e) if data: if (len(data) > 1): self.data = np.concatenate(data) else: self.data = data[0] logging.info(self.data.shape) self.test_mode = test_mode self.n_components = n_components self.m = m self.v = v self.sub = sub self.model = None self.searcher = None self.pca_reduction = None self.P = None self.mu = None self.model_proto_filename = model_proto_filename self.P_filename = model_proto_filename.replace('.proto', '.P.npy') self.mu_filename = model_proto_filename.replace('.proto', '.mu.npy') self.pca_filename = model_proto_filename.replace('.proto', '.pca.pkl') self.model_lmdb_filename = model_proto_filename.replace('.proto', '_lmdb') self.permuted_inds_filename = model_proto_filename.replace('.proto', '.permuted_inds.pkl') self.permuted_inds = None
'A simple PCA implementation that demonstrates how eigenvalue allocation is used to permute dimensions in order to balance the variance across subvectors. There are plenty of PCA implementations elsewhere. What is important is that the eigenvalues can be used to compute a variance-balancing dimension permutation.'
def pca(self):
(count, D) = self.data.shape mu = (self.data.sum(axis=0) / float(count)) summed_covar = reduce((lambda acc, x: (acc + np.outer(x, x))), self.data, np.zeros((D, D))) A = ((summed_covar / (count - 1)) - np.outer(mu, mu)) (eigenvalues, P) = np.linalg.eigh(A) self.permuted_inds = eigenvalue_allocation(2, eigenvalues) P = P[:, self.permuted_inds] return (P, mu)
'Constructs a SsdInceptionV2FeatureExtractor. Args: depth_multiplier: float depth multiplier for feature extractor Returns: an ssd_inception_v2_feature_extractor.SsdInceptionV2FeatureExtractor.'
def _create_feature_extractor(self, depth_multiplier):
min_depth = 32 conv_hyperparams = {} return ssd_inception_v2_feature_extractor.SSDInceptionV2FeatureExtractor(depth_multiplier, min_depth, conv_hyperparams)
'MobileNetV1 Feature Extractor for SSD Models. Args: depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops. reuse_weights: Whether to reuse variables. Default is None.'
def __init__(self, depth_multiplier, min_depth, conv_hyperparams, reuse_weights=None):
super(SSDMobileNetV1FeatureExtractor, self).__init__(depth_multiplier, min_depth, conv_hyperparams, reuse_weights)
'SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images.'
def preprocess(self, resized_inputs):
return (((2.0 / 255.0) * resized_inputs) - 1.0)
'Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i]'
def extract_features(self, preprocessed_inputs):
preprocessed_inputs.get_shape().assert_has_rank(4) shape_assert = tf.Assert(tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) feature_map_layout = {'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', '', ''], 'layer_depth': [(-1), (-1), 512, 256, 256, 128]} with tf.control_dependencies([shape_assert]): with slim.arg_scope(self._conv_hyperparams): with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: (_, image_features) = mobilenet_v1.mobilenet_v1_base(preprocessed_inputs, final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps(feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
'Constructs a new feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor Returns: an ssd_meta_arch.SSDFeatureExtractor object.'
def _create_feature_extractor(self, depth_multiplier):
min_depth = 32 conv_hyperparams = {} return ssd_mobilenet_v1_feature_extractor.SSDMobileNetV1FeatureExtractor(depth_multiplier, min_depth, conv_hyperparams)
'Constructor. Args: architecture: Architecture name of the Resnet V1 model. resnet_model: Definition of the Resnet V1 model. is_training: See base class. first_stage_features_stride: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16.'
def __init__(self, architecture, resnet_model, is_training, first_stage_features_stride, reuse_weights=None, weight_decay=0.0):
if ((first_stage_features_stride != 8) and (first_stage_features_stride != 16)): raise ValueError('`first_stage_features_stride` must be 8 or 16.') self._architecture = architecture self._resnet_model = resnet_model super(FasterRCNNResnetV1FeatureExtractor, self).__init__(is_training, first_stage_features_stride, reuse_weights, weight_decay)
'Faster R-CNN Resnet V1 preprocessing. VGG style channel mean subtraction as described here: https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images.'
def preprocess(self, resized_inputs):
channel_means = [123.68, 116.779, 103.939] return (resized_inputs - [[channel_means]])
'Extracts first stage RPN features. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] Raises: InvalidArgumentError: If the spatial size of `preprocessed_inputs` (height or width) is less than 33. ValueError: If the created network is missing the required activation.'
def _extract_proposal_features(self, preprocessed_inputs, scope):
if (len(preprocessed_inputs.get_shape().as_list()) != 4): raise ValueError(('`preprocessed_inputs` must be 4 dimensional, got a tensor of shape %s' % preprocessed_inputs.get_shape())) shape_assert = tf.Assert(tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) with tf.control_dependencies([shape_assert]): with slim.arg_scope(resnet_utils.resnet_arg_scope(batch_norm_epsilon=1e-05, batch_norm_scale=True, weight_decay=self._weight_decay)): with tf.variable_scope(self._architecture, reuse=self._reuse_weights) as var_scope: (_, activations) = self._resnet_model(preprocessed_inputs, num_classes=None, is_training=False, global_pool=False, output_stride=self._first_stage_features_stride, spatial_squeeze=False, scope=var_scope) handle = (scope + ('/%s/block3' % self._architecture)) return activations[handle]
'Extracts second stage box classifier features. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name (unused). Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal.'
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
with tf.variable_scope(self._architecture, reuse=self._reuse_weights): with slim.arg_scope(resnet_utils.resnet_arg_scope(batch_norm_epsilon=1e-05, batch_norm_scale=True, weight_decay=self._weight_decay)): with slim.arg_scope([slim.batch_norm], is_training=False): blocks = [resnet_utils.Block('block4', resnet_v1.bottleneck, ([{'depth': 2048, 'depth_bottleneck': 512, 'stride': 1}] * 3))] proposal_classifier_features = resnet_utils.stack_blocks_dense(proposal_feature_maps, blocks) return proposal_classifier_features
'Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16, or if `architecture` is not supported.'
def __init__(self, is_training, first_stage_features_stride, reuse_weights=None, weight_decay=0.0):
super(FasterRCNNResnet50FeatureExtractor, self).__init__('resnet_v1_50', resnet_v1.resnet_v1_50, is_training, first_stage_features_stride, reuse_weights, weight_decay)
'Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16, or if `architecture` is not supported.'
def __init__(self, is_training, first_stage_features_stride, reuse_weights=None, weight_decay=0.0):
super(FasterRCNNResnet101FeatureExtractor, self).__init__('resnet_v1_101', resnet_v1.resnet_v1_101, is_training, first_stage_features_stride, reuse_weights, weight_decay)
'Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16, or if `architecture` is not supported.'
def __init__(self, is_training, first_stage_features_stride, reuse_weights=None, weight_decay=0.0):
super(FasterRCNNResnet152FeatureExtractor, self).__init__('resnet_v1_152', resnet_v1.resnet_v1_152, is_training, first_stage_features_stride, reuse_weights, weight_decay)
'InceptionV2 Feature Extractor for SSD Models. Args: depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops. reuse_weights: Whether to reuse variables. Default is None.'
def __init__(self, depth_multiplier, min_depth, conv_hyperparams, reuse_weights=None):
super(SSDInceptionV2FeatureExtractor, self).__init__(depth_multiplier, min_depth, conv_hyperparams, reuse_weights)
'SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images.'
def preprocess(self, resized_inputs):
return (((2.0 / 255.0) * resized_inputs) - 1.0)
'Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i]'
def extract_features(self, preprocessed_inputs):
preprocessed_inputs.get_shape().assert_has_rank(4) shape_assert = tf.Assert(tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33), tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)), ['image size must at least be 33 in both height and width.']) feature_map_layout = {'from_layer': ['Mixed_4c', 'Mixed_5c', '', '', '', ''], 'layer_depth': [(-1), (-1), 512, 256, 256, 128]} with tf.control_dependencies([shape_assert]): with slim.arg_scope(self._conv_hyperparams): with tf.variable_scope('InceptionV2', reuse=self._reuse_weights) as scope: (_, image_features) = inception_v2.inception_v2_base(preprocessed_inputs, final_endpoint='Mixed_5c', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, scope=scope) feature_maps = feature_map_generators.multi_resolution_feature_maps(feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
'Checks the extracted features are of correct shape. Args: feature_extractor: The feature extractor to test. preprocessed_inputs: A [batch, height, width, 3] tensor to extract features with. expected_feature_map_shapes: The expected shape of the extracted features.'
def _validate_features_shape(self, feature_extractor, preprocessed_inputs, expected_feature_map_shapes):
feature_maps = feature_extractor.extract_features(preprocessed_inputs) feature_map_shapes = [tf.shape(feature_map) for feature_map in feature_maps] init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) feature_map_shapes_out = sess.run(feature_map_shapes) for (shape_out, exp_shape_out) in zip(feature_map_shapes_out, expected_feature_map_shapes): self.assertAllEqual(shape_out, exp_shape_out)
'Constructs a new feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor Returns: an ssd_meta_arch.SSDFeatureExtractor object.'
@abstractmethod def _create_feature_extractor(self, depth_multiplier):
pass
'Constructor. Args: is_training: See base class. first_stage_features_stride: See base class. reuse_weights: See base class. weight_decay: See base class. Raises: ValueError: If `first_stage_features_stride` is not 8 or 16.'
def __init__(self, is_training, first_stage_features_stride, reuse_weights=None, weight_decay=0.0):
if ((first_stage_features_stride != 8) and (first_stage_features_stride != 16)): raise ValueError('`first_stage_features_stride` must be 8 or 16.') super(FasterRCNNInceptionResnetV2FeatureExtractor, self).__init__(is_training, first_stage_features_stride, reuse_weights, weight_decay)
'Faster R-CNN with Inception Resnet v2 preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: A [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: A [batch, height_out, width_out, channels] float32 tensor representing a batch of images.'
def preprocess(self, resized_inputs):
return (((2.0 / 255.0) * resized_inputs) - 1.0)
'Extracts first stage RPN features. Extracts features using the first half of the Inception Resnet v2 network. We construct the network in `align_feature_maps=True` mode, which means that all VALID paddings in the network are changed to SAME padding so that the feature maps are aligned. Args: preprocessed_inputs: A [batch, height, width, channels] float32 tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth] Raises: InvalidArgumentError: If the spatial size of `preprocessed_inputs` (height or width) is less than 33. ValueError: If the created network is missing the required activation.'
def _extract_proposal_features(self, preprocessed_inputs, scope):
if (len(preprocessed_inputs.get_shape().as_list()) != 4): raise ValueError(('`preprocessed_inputs` must be 4 dimensional, got a tensor of shape %s' % preprocessed_inputs.get_shape())) with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope(weight_decay=self._weight_decay)): with slim.arg_scope([slim.batch_norm], is_training=False): with tf.variable_scope('InceptionResnetV2', reuse=self._reuse_weights) as scope: (rpn_feature_map, _) = inception_resnet_v2.inception_resnet_v2_base(preprocessed_inputs, final_endpoint='PreAuxLogits', scope=scope, output_stride=self._first_stage_features_stride, align_feature_maps=True) return rpn_feature_map