function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
def Get(self, limit, offset=0): """Get results of the query with a limit on the number of results. Args: limit: maximum number of values to return. offset: offset requested -- if nonzero, this will override the offset in the original query Returns: A list of entities with at most "limit" entries (less if the query completes before reading limit values). """ count = 1 result = [] iterator = self.Run() try: for i in xrange(offset): val = iterator.next() except __HOLE__: pass try: while count <= limit: val = iterator.next() result.append(val) count += 1 except StopIteration: pass return result
StopIteration
dataset/ETHPy150Open CollabQ/CollabQ/.google_appengine/google/appengine/api/datastore.py/MultiQuery.Get
def __init__(self, entity_iterator, orderings): """Ctor. Args: entity_iterator: an iterator of entities which will be wrapped. orderings: an iterable of (identifier, order) pairs. order should be either Query.ASCENDING or Query.DESCENDING. """ self.__entity_iterator = entity_iterator self.__entity = None self.__min_max_value_cache = {} try: self.__entity = entity_iterator.next() except __HOLE__: pass else: self.__orderings = orderings
StopIteration
dataset/ETHPy150Open CollabQ/CollabQ/.google_appengine/google/appengine/api/datastore.py/MultiQuery.SortOrderEntity.__init__
def __delitem__(self, query_filter): """Delete a filter by deleting it from all subqueries. If a KeyError is raised during the attempt, it is ignored, unless every subquery raised a KeyError. If any other exception is raised, any deletes will be rolled back. Args: query_filter: the filter to delete. Raises: KeyError: No subquery had an entry containing query_filter. """ subquery_count = len(self.__bound_queries) keyerror_count = 0 saved_items = [] for index, query in enumerate(self.__bound_queries): try: saved_items.append(query.get(query_filter, None)) del query[query_filter] except __HOLE__: keyerror_count += 1 except: for q, old_value in itertools.izip(self.__bound_queries[:index], saved_items): if old_value is not None: q[query_filter] = old_value raise if keyerror_count == subquery_count: raise KeyError(query_filter)
KeyError
dataset/ETHPy150Open CollabQ/CollabQ/.google_appengine/google/appengine/api/datastore.py/MultiQuery.__delitem__
def next(self): if not self.__buffer: self.__buffer = self._Next(self.__batch_size) try: return self.__buffer.pop(0) except __HOLE__: raise StopIteration
IndexError
dataset/ETHPy150Open CollabQ/CollabQ/.google_appengine/google/appengine/api/datastore.py/Iterator.next
def has_migration_started(self,): from region_migration.models import DatabaseRegionMigrationDetail try: migration = self.migration.get() except __HOLE__: return False if migration.is_migration_finished(): return False if migration.current_step > 0: return True status_to_check = [DatabaseRegionMigrationDetail.WAITING, DatabaseRegionMigrationDetail.RUNNING] details = migration.details.filter(status__in=status_to_check) if details: return True return False
ObjectDoesNotExist
dataset/ETHPy150Open globocom/database-as-a-service/dbaas/logical/models.py/Database.has_migration_started
def get_field_type(values): """ Determines the type of every item in the value list or dictionary, then consolidates that into a single output type. This is used to determine what type to convert an entire field into - often within a database. Input - a list or dictionary of strings Output: - a single value what type the data can be safetly converted into. Types identified (and returned) include: - unknown - string - integer - float - timestamp Test Coverage: - complete via test harness """ type_freq = collections.defaultdict(int) # count occurances of each type: for key in values: i = _get_type(key) if i != 'unknown': # NOTE: unknown is filtered out try: # values is a dict type_freq[i] += values[key] except __HOLE__: # values is a list type_freq[i] += 1 type_list = type_freq.keys() # try simple rules: result = _get_field_type_rule(type_list) if result: return result # try probabilities: result = _get_field_type_probability(type_freq) return (result or 'unknown')
TypeError
dataset/ETHPy150Open kenfar/DataGristle/gristle/field_type.py/get_field_type
def is_string(value): """ Returns True if the value is a string, subject to false-negatives if the string is all numeric. 'b' is True '' is True ' ' is True '$3' is True '4,333' is True '33.22' is False '3' is False '-3' is False 3 is False 3.3 is False None is False Test coverage: - complete, via test harness """ try: # catch integers & floats float(value) return False except TypeError: # catch None return False except __HOLE__: # catch characters return True
ValueError
dataset/ETHPy150Open kenfar/DataGristle/gristle/field_type.py/is_string
def is_integer(value): """ Returns True if the input consists soley of digits and represents an integer rather than character data or a float. '3' is True '-3' is True 3 is True -3 is True 3.3 is False '33.22' is False '4,333' is False '$3' is False '' is False 'b' is False None is False Test coverage: - complete, via test harness """ try: i = float(value) fract, dummy = math.modf(i) if fract > 0: return False else: return True except ValueError: return False except __HOLE__: return False
TypeError
dataset/ETHPy150Open kenfar/DataGristle/gristle/field_type.py/is_integer
def is_float(value): """ Returns True if the input consists soley of digits and represents a float rather than character data or an integer. 44.55 is True '33.22' is True 6 is False '3' is False '-3' is False '4,333' is False '$3' is False '' is False 'b' is False None is False Test coverage: - complete, via test harness """ try: i = float(value) fract, dummy = math.modf(i) if fract == 0: return False else: return True except __HOLE__: return False except TypeError: return False
ValueError
dataset/ETHPy150Open kenfar/DataGristle/gristle/field_type.py/is_float
def is_unknown(value): """ Returns True if the value is a common unknown indicator: '' is True ' ' is True 'na' is True 'NA' is True 'n/a' is True 'N/A' is True 'unk' is True 'unknown' is True '3' is False '-3' is False '33.22' is False '4,333' is False '$3' is False 'b' is False 3 is False 3.3 is False None is False Test coverage: - complete, via test harness """ unk_vals = ['na', 'n/a', 'unk', 'unknown', ''] try: if value.strip().lower() in unk_vals: return True else: return False except AttributeError: return False except __HOLE__: return False
TypeError
dataset/ETHPy150Open kenfar/DataGristle/gristle/field_type.py/is_unknown
def is_timestamp(time_str): """ Determine if arg is a timestamp and if so what format Args: time_str - character string that may be a date, time, epoch or combo Returns: status - True if date/time False if not scope - kind of timestamp pattern - date mask To do: - consider overrides to default date min & max epoch limits - consider consolidating epoch checks with rest of checks """ non_date = (False, None, None) if len(time_str) > DATE_MAX_LEN: return non_date try: float_str = float(time_str) if DATE_MIN_EPOCH_DEFAULT < float_str < DATE_MAX_EPOCH_DEFAULT: t_date = datetime.datetime.fromtimestamp(float(time_str)) return True, 'second', 'epoch' except ValueError: pass for scope, pattern, date_format in DATE_FORMATS: if scope == "microsecond": # Special handling for microsecond part. AFAIK there isn't a # strftime code for this. if time_str.count('.') != 1: continue time_str, microseconds_str = time_str.split('.') try: microsecond = int((microseconds_str + '000000')[:6]) except ValueError: continue try: t_date = datetime.datetime.strptime(time_str, date_format) except __HOLE__: pass else: if scope == "microsecond": t_date = t_date.replace(microsecond=microsecond) return True, scope, pattern else: return False, None, None
ValueError
dataset/ETHPy150Open kenfar/DataGristle/gristle/field_type.py/is_timestamp
def setdefault(self, key, value): """We may not always be connected to an app, but we still need to provide a way to the base environment to set it's defaults. """ try: super(FlaskConfigStorage, self).setdefault(key, value) except __HOLE__: self._defaults.__setitem__(key, value)
RuntimeError
dataset/ETHPy150Open miracle2k/flask-assets/src/flask_assets.py/FlaskConfigStorage.setdefault
def split_prefix(self, ctx, item): """See if ``item`` has blueprint prefix, return (directory, rel_path). """ app = ctx._app try: if hasattr(app, 'blueprints'): blueprint, name = item.split('/', 1) directory = get_static_folder(app.blueprints[blueprint]) endpoint = '%s.static' % blueprint item = name else: # Module support for Flask < 0.7 module, name = item.split('/', 1) directory = get_static_folder(app.modules[module]) endpoint = '%s.static' % module item = name except (__HOLE__, KeyError): directory = get_static_folder(app) endpoint = 'static' return directory, item, endpoint
ValueError
dataset/ETHPy150Open miracle2k/flask-assets/src/flask_assets.py/FlaskResolver.split_prefix
def search_for_source(self, ctx, item): # If a load_path is set, use it instead of the Flask static system. # # Note: With only env.directory set, we don't go to default; # Setting env.directory only makes the output directory fixed. if self.use_webassets_system_for_sources(ctx): return Resolver.search_for_source(self, ctx, item) # Look in correct blueprint's directory directory, item, endpoint = self.split_prefix(ctx, item) try: return self.consider_single_directory(directory, item) except __HOLE__: # XXX: Hack to make the tests pass, which are written to not # expect an IOError upon missing files. They need to be rewritten. return path.normpath(path.join(directory, item))
IOError
dataset/ETHPy150Open miracle2k/flask-assets/src/flask_assets.py/FlaskResolver.search_for_source
def convert_item_to_flask_url(self, ctx, item, filepath=None): """Given a relative reference like `foo/bar.css`, returns the Flask static url. By doing so it takes into account blueprints, i.e. in the aformentioned example, ``foo`` may reference a blueprint. If an absolute path is given via ``filepath``, it will be used instead. This is needed because ``item`` may be a glob instruction that was resolved to multiple files. If app.config("FLASK_ASSETS_USE_S3") exists and is True then we import the url_for function from flask.ext.s3, otherwise we import url_for from flask directly. If app.config("FLASK_ASSETS_USE_CDN") exists and is True then we import the url_for function from flask. """ if ctx.environment._app.config.get("FLASK_ASSETS_USE_S3"): try: from flask.ext.s3 import url_for except __HOLE__ as e: print("You must have Flask S3 to use FLASK_ASSETS_USE_S3 option") raise e elif ctx.environment._app.config.get("FLASK_ASSETS_USE_CDN"): try: from flask.ext.cdn import url_for except ImportError as e: print("You must have Flask CDN to use FLASK_ASSETS_USE_CDN option") raise e else: from flask import url_for directory, rel_path, endpoint = self.split_prefix(ctx, item) if filepath is not None: filename = filepath[len(directory)+1:] else: filename = rel_path flask_ctx = None if not _request_ctx_stack.top: flask_ctx = ctx.environment._app.test_request_context() flask_ctx.push() try: url = url_for(endpoint, filename=filename) # In some cases, url will be an absolute url with a scheme and hostname. # (for example, when using werkzeug's host matching). # In general, url_for() will return a http url. During assets build, we # we don't know yet if the assets will be served over http, https or both. # Let's use // instead. url_for takes a _scheme argument, but only together # with external=True, which we do not want to force every time. Further, # this _scheme argument is not able to render // - it always forces a colon. if url and url.startswith('http:'): url = url[5:] return url finally: if flask_ctx: flask_ctx.pop()
ImportError
dataset/ETHPy150Open miracle2k/flask-assets/src/flask_assets.py/FlaskResolver.convert_item_to_flask_url
@property def _app(self): """The application object to work with; this is either the app that we have been bound to, or the current application. """ if self.app is not None: return self.app ctx = _request_ctx_stack.top if ctx is not None: return ctx.app try: from flask import _app_ctx_stack app_ctx = _app_ctx_stack.top if app_ctx is not None: return app_ctx.app except __HOLE__: pass raise RuntimeError('assets instance not bound to an application, '+ 'and no application in current context') # XXX: This is required because in a couple of places, webassets 0.6 # still access env.directory, at one point even directly. We need to # fix this for 0.6 compatibility, but it might be preferrable to # introduce another API similar to _normalize_source_path() for things # like the cache directory and output files.
ImportError
dataset/ETHPy150Open miracle2k/flask-assets/src/flask_assets.py/Environment._app
def get_file(file_name): try: f = open(file_name, "rb") f_content = f.read() f.close() except __HOLE__, e: sys.stderr.write("[-] Error reading file %s.\n" % e) sys.exit(ERR) sys.stdout.write("[+] File is ready and is in memory.\n") return base64.b64encode(f_content), zlib.crc32(f_content)
IOError
dataset/ETHPy150Open ytisf/PyExfil/pyexfil/pop_exfil_client.py/get_file
def author_addon_clicked(f): """Decorator redirecting clicks on "Other add-ons by author".""" @functools.wraps(f) def decorated(request, *args, **kwargs): redirect_id = request.GET.get('addons-author-addons-select', None) if not redirect_id: return f(request, *args, **kwargs) try: target_id = int(redirect_id) return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[target_id])) except __HOLE__: return http.HttpResponseBadRequest('Invalid add-on ID.') return decorated
ValueError
dataset/ETHPy150Open mozilla/addons-server/src/olympia/addons/views.py/author_addon_clicked
@addon_valid_disabled_pending_view @non_atomic_requests def addon_detail(request, addon): """Add-ons details page dispatcher.""" if addon.is_deleted or (addon.is_pending() and not addon.is_persona()): # Allow pending themes to be listed. raise http.Http404 if addon.is_disabled: return render(request, 'addons/impala/disabled.html', {'addon': addon}, status=404) # addon needs to have a version and be valid for this app. if addon.type in request.APP.types: if addon.type == amo.ADDON_PERSONA: return persona_detail(request, addon) else: if not addon.current_version: raise http.Http404 return extension_detail(request, addon) else: # Redirect to an app that supports this type. try: new_app = [a for a in amo.APP_USAGE if addon.type in a.types][0] except __HOLE__: raise http.Http404 else: prefixer = urlresolvers.get_url_prefix() prefixer.app = new_app.short return http.HttpResponsePermanentRedirect(reverse( 'addons.detail', args=[addon.slug]))
IndexError
dataset/ETHPy150Open mozilla/addons-server/src/olympia/addons/views.py/addon_detail
@mobile_template('addons/{mobile/}persona_detail.html') @non_atomic_requests def persona_detail(request, addon, template=None): """Details page for Personas.""" if not (addon.is_public() or addon.is_pending()): raise http.Http404 persona = addon.persona # This persona's categories. categories = addon.categories.all() category_personas = None if categories.exists(): qs = Addon.objects.public().filter(categories=categories[0]) category_personas = _category_personas(qs, limit=6) data = { 'addon': addon, 'persona': persona, 'categories': categories, 'author_personas': persona.authors_other_addons()[:3], 'category_personas': category_personas, } try: author = addon.authors.all()[0] except __HOLE__: author = None else: author = author.get_url_path(src='addon-detail') data['author_gallery'] = author if not request.MOBILE: # tags dev_tags, user_tags = addon.tags_partitioned_by_developer data.update({ 'dev_tags': dev_tags, 'user_tags': user_tags, 'review_form': ReviewForm(), 'reviews': Review.objects.valid().filter(addon=addon, is_latest=True), 'get_replies': Review.get_replies, 'search_cat': 'themes', 'abuse_form': AbuseForm(request=request), }) return render(request, template, data)
IndexError
dataset/ETHPy150Open mozilla/addons-server/src/olympia/addons/views.py/persona_detail
def _parse_file_contents(self, selector_file_contents): try: selector_input_json = json.loads(selector_file_contents) except __HOLE__: raise SelectorParseError('MalformedJSON') self._validate_selector_input(selector_input_json) selectors = self._parse_input_for_selectors(selector_input_json) return selectors
ValueError
dataset/ETHPy150Open m-lab/telescope/telescope/selector.py/SelectorFileParser._parse_file_contents
def _parse_start_time(self, start_time_string): """Parse the time window start time. Parse the start time from the expected timestamp format to Python datetime format. Must be in UTC time. Args: start_time_string: (str) Timestamp in format YYYY-MM-DDTHH-mm-SS. Returns: datetime: Python datetime for set timestamp string. """ try: timestamp = ( datetime.datetime.strptime(start_time_string, '%Y-%m-%dT%H:%M:%SZ')) return utils.make_datetime_utc_aware(timestamp) except __HOLE__: raise SelectorParseError('UnsupportedSubsetDateFormat')
ValueError
dataset/ETHPy150Open m-lab/telescope/telescope/selector.py/SelectorFileParser._parse_start_time
def _parse_ip_translation(self, ip_translation_dict): """Parse the ip_translation field into an IPTranslationStrategySpec object. Args: ip_translation_dict: (dict) An unprocessed dictionary of ip_translation data from the input selector file. Returns: IPTranslationStrategySpec: An IPTranslationStrategySpec, which specifies properties of the IP translation strategy according to the selector file. """ try: ip_translation_spec = iptranslation.IPTranslationStrategySpec ip_translation_spec.strategy_name = ip_translation_dict['strategy'] ip_translation_spec.params = ip_translation_dict['params'] return ip_translation_spec except __HOLE__ as e: raise SelectorParseError( ('Missing expected field in ip_translation ' 'dict: %s') % e.args[0])
KeyError
dataset/ETHPy150Open m-lab/telescope/telescope/selector.py/SelectorFileParser._parse_ip_translation
@property def last_results(self): """The last result that was produced.""" try: return self.results[-1][0] except __HOLE__: exc.raise_with_cause(exc.NotFound, "Last results not found")
IndexError
dataset/ETHPy150Open openstack/taskflow/taskflow/persistence/models.py/RetryDetail.last_results
@property def last_failures(self): """The last failure dictionary that was produced. NOTE(harlowja): This is **not** the same as the local ``failure`` attribute as the obtained failure dictionary in the ``results`` attribute (which is what this returns) is from associated atom failures (which is different from the directly related failure of the retry unit associated with this atom detail). """ try: return self.results[-1][1] except __HOLE__: exc.raise_with_cause(exc.NotFound, "Last failures not found")
IndexError
dataset/ETHPy150Open openstack/taskflow/taskflow/persistence/models.py/RetryDetail.last_failures
def atom_detail_class(atom_type): try: return _NAME_TO_DETAIL[atom_type] except __HOLE__: raise TypeError("Unknown atom type '%s'" % (atom_type))
KeyError
dataset/ETHPy150Open openstack/taskflow/taskflow/persistence/models.py/atom_detail_class
def atom_detail_type(atom_detail): try: return _DETAIL_TO_NAME[type(atom_detail)] except __HOLE__: raise TypeError("Unknown atom '%s' (%s)" % (atom_detail, type(atom_detail)))
KeyError
dataset/ETHPy150Open openstack/taskflow/taskflow/persistence/models.py/atom_detail_type
def handle_noargs(self, develop, **options): call_command('sync_and_migrate') try: from molly.wurfl import wurfl_data except __HOLE__: no_wurfl = True else: no_wurfl = False if no_wurfl or not develop: call_command('update_wurfl') call_command('generate_markers', lazy=True) call_command('collectstatic', interactive=False, link=(develop and os.name != 'nt')) # Forcing compression because it seems to not compress *sometimes* even if files # have been changed... call_command('synccompress') call_command('synccompress', force=True) if develop: call_command('runserver')
ImportError
dataset/ETHPy150Open mollyproject/mollyproject/molly/utils/management/commands/deploy.py/Command.handle_noargs
def _main(): import re import sys args = sys.argv[1:] inFileName = args and args[0] or "Include/token.h" outFileName = "Lib/token.py" if len(args) > 1: outFileName = args[1] try: fp = open(inFileName) except IOError as err: sys.stdout.write("I/O error: %s\n" % str(err)) sys.exit(1) lines = fp.read().split("\n") fp.close() prog = re.compile( "#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)", re.IGNORECASE) tokens = {} for line in lines: match = prog.match(line) if match: name, val = match.group(1, 2) val = int(val) tokens[val] = name # reverse so we can sort them... keys = sorted(tokens.keys()) # load the output skeleton from the target: try: fp = open(outFileName) except __HOLE__ as err: sys.stderr.write("I/O error: %s\n" % str(err)) sys.exit(2) format = fp.read().split("\n") fp.close() try: start = format.index("#--start constants--") + 1 end = format.index("#--end constants--") except ValueError: sys.stderr.write("target does not contain format markers") sys.exit(3) lines = [] for val in keys: lines.append("%s = %d" % (tokens[val], val)) format[start:end] = lines try: fp = open(outFileName, 'w') except IOError as err: sys.stderr.write("I/O error: %s\n" % str(err)) sys.exit(4) fp.write("\n".join(format)) fp.close()
IOError
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/token.py/_main
def is_valid_url(uri): try: urlvalidate(uri) except __HOLE__: return False return True
ValidationError
dataset/ETHPy150Open kobotoolbox/kobocat/onadata/apps/main/models/meta_data.py/is_valid_url
def _set_hash(self): if not self.data_file: return None file_exists = self.data_file.storage.exists(self.data_file.name) if (file_exists and self.data_file.name != '') \ or (not file_exists and self.data_file): try: self.data_file.seek(os.SEEK_SET) except __HOLE__: return u'' else: self.file_hash = u'md5:%s' \ % md5(self.data_file.read()).hexdigest() return self.file_hash return u''
IOError
dataset/ETHPy150Open kobotoolbox/kobocat/onadata/apps/main/models/meta_data.py/MetaData._set_hash
def setUp(self): super(GRRBaseTest, self).setUp() tmpdir = os.environ.get("TEST_TMPDIR") or config_lib.CONFIG["Test.tmpdir"] # Make a temporary directory for test files. self.temp_dir = tempfile.mkdtemp(dir=tmpdir) config_lib.CONFIG.SetWriteBack( os.path.join(self.temp_dir, "writeback.yaml")) if self.install_mock_acl: # Enforce checking that security tokens are propagated to the data store # but no actual ACLs. data_store.DB.security_manager = MockSecurityManager() logging.info("Starting test: %s.%s", self.__class__.__name__, self._testMethodName) self.last_start_time = time.time() try: # Clear() is much faster than init but only supported for FakeDataStore. data_store.DB.Clear() except __HOLE__: self.InitDatastore() aff4.FACTORY.Flush() # Create a Foreman and Filestores, they are used in many tests. aff4_grr.GRRAFF4Init().Run() filestore.FileStoreInit().Run() hunts_results.ResultQueueInitHook().Run() api_auth_manager.APIACLInit.InitApiAuthManager() # Stub out the email function self.emails_sent = [] def SendEmailStub(to_user, from_user, subject, message, **unused_kwargs): self.emails_sent.append((to_user, from_user, subject, message)) self.mail_stubber = utils.MultiStubber( (email_alerts.EMAIL_ALERTER, "SendEmail", SendEmailStub), (email.utils, "make_msgid", lambda: "<message id stub>")) self.mail_stubber.Start() self.nanny_stubber = utils.Stubber( client_utils_linux.NannyController, "StartNanny", lambda unresponsive_kill_period=None, nanny_logfile=None: True) self.nanny_stubber.Start()
AttributeError
dataset/ETHPy150Open google/grr/grr/lib/test_lib.py/GRRBaseTest.setUp
def run(self, result=None): # pylint: disable=g-bad-name """Run the test case. This code is basically the same as the standard library, except that when there is an exception, the --debug flag allows us to drop into the raising function for interactive inspection of the test failure. Args: result: The testResult object that we will use. """ if result is None: result = self.defaultTestResult() result.startTest(self) testMethod = getattr( # pylint: disable=g-bad-name self, self._testMethodName) try: try: self.setUp() except unittest.SkipTest: result.addSkip(self, sys.exc_info()) result.stopTest(self) return except: # Break into interactive debugger on test failure. if flags.FLAGS.debug: pdb.post_mortem() result.addError(self, sys.exc_info()) # If the setup step failed we stop the entire test suite # immediately. This helps catch errors in the setUp() function. raise ok = False try: testMethod() ok = True except self.failureException: # Break into interactive debugger on test failure. if flags.FLAGS.debug: pdb.post_mortem() result.addFailure(self, sys.exc_info()) except KeyboardInterrupt: raise except unittest.SkipTest: result.addSkip(self, sys.exc_info()) except Exception: # pylint: disable=broad-except # Break into interactive debugger on test failure. if flags.FLAGS.debug: pdb.post_mortem() result.addError(self, sys.exc_info()) try: self.tearDown() except __HOLE__: raise except Exception: # pylint: disable=broad-except # Break into interactive debugger on test failure. if flags.FLAGS.debug: pdb.post_mortem() result.addError(self, sys.exc_info()) ok = False if ok: result.addSuccess(self) finally: result.stopTest(self)
KeyboardInterrupt
dataset/ETHPy150Open google/grr/grr/lib/test_lib.py/GRRBaseTest.run
def RunForTimeWithNoExceptions(self, cmd, argv, timeout=10, should_exit=False, check_exit_code=False): """Run a command line argument and check for python exceptions raised. Args: cmd: The command to run as a string. argv: The args. timeout: How long to let the command run before terminating. should_exit: If True we will raise if the command hasn't exited after the specified timeout. check_exit_code: If True and should_exit is True, we'll check that the exit code was 0 and raise if it isn't. Raises: RuntimeError: On any errors. """ def HandleTimeout(unused_signum, unused_frame): raise TimeoutError() exited = False proc = None try: logging.info("Running : %s", [cmd] + argv) proc = subprocess.Popen([cmd] + argv, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1) signal.signal(signal.SIGALRM, HandleTimeout) signal.alarm(timeout) stdout = StringIO.StringIO() while True: proc.poll() # Iterate through the output so that we get the output data even if we # kill the process. for line in proc.stdout.readline(): stdout.write(line) if proc.returncode is not None: exited = True break except TimeoutError: pass # We expect timeouts. finally: signal.alarm(0) try: if proc: proc.kill() except __HOLE__: pass # Could already be dead. proc.stdout.flush() stdout.write(proc.stdout.read()) # Collect any remaining output. if "Traceback (" in stdout.getvalue(): raise RuntimeError("Exception found in stderr of binary Stderr:\n###\n%s" "###\nCmd: %s" % (stdout.getvalue(), cmd)) if should_exit and not exited: raise RuntimeError("Bin: %s got timeout when when executing, expected " "exit. \n%s\n" % (stdout.getvalue(), cmd)) if not should_exit and exited: raise RuntimeError("Bin: %s exited, but should have stayed running.\n%s\n" % (stdout.getvalue(), cmd)) if should_exit and check_exit_code: if proc.returncode != 0: raise RuntimeError("Bin: %s should have returned exit code 0 but got " "%s" % (cmd, proc.returncode))
OSError
dataset/ETHPy150Open google/grr/grr/lib/test_lib.py/GRRBaseTest.RunForTimeWithNoExceptions
def _GetActionInstance(self, action_name, arg=None, grr_worker=None, action_worker_cls=None): """Run an action and generate responses. This basically emulates GRRClientWorker.HandleMessage(). Args: action_name: The action to run. arg: A protobuf to pass the action. grr_worker: The GRRClientWorker instance to use. If not provided we make a new one. action_worker_cls: The action worker class to use for iterated actions. If not provided we use the default. Returns: A list of response protobufs. """ # A mock SendReply() method to collect replies. def MockSendReply(mock_self, reply=None, **kwargs): if reply is None: reply = mock_self.out_rdfvalues[0](**kwargs) self.results.append(reply) if grr_worker is None: grr_worker = worker_mocks.FakeClientWorker() try: suspended_action_id = arg.iterator.suspended_action action = grr_worker.suspended_actions[suspended_action_id] except (AttributeError, __HOLE__): action_cls = actions.ActionPlugin.classes[action_name] if issubclass(action_cls, actions.SuspendableAction): action = action_cls(grr_worker=grr_worker, action_worker_cls=action_worker_cls) else: action = action_cls(grr_worker=grr_worker) action.SendReply = types.MethodType(MockSendReply, action) return action
KeyError
dataset/ETHPy150Open google/grr/grr/lib/test_lib.py/EmptyActionTest._GetActionInstance
def Start(self): for k, v in self._overrides.iteritems(): self._saved_values[k] = config_lib.CONFIG.Get(k) try: config_lib.CONFIG.Set.old_target(k, v) except __HOLE__: config_lib.CONFIG.Set(k, v)
AttributeError
dataset/ETHPy150Open google/grr/grr/lib/test_lib.py/ConfigOverrider.Start
def Stop(self): for k, v in self._saved_values.iteritems(): try: config_lib.CONFIG.Set.old_target(k, v) except __HOLE__: config_lib.CONFIG.Set(k, v)
AttributeError
dataset/ETHPy150Open google/grr/grr/lib/test_lib.py/ConfigOverrider.Stop
def __enter__(self): self.old_datetime = datetime.datetime class FakeDateTime(object): def __init__(self, time_val, increment, orig_datetime): self.time = time_val self.increment = increment self.orig_datetime = orig_datetime def __getattribute__(self, name): try: return object.__getattribute__(self, name) except __HOLE__: return getattr(self.orig_datetime, name) def utcnow(self): # pylint: disable=invalid-name self.time += self.increment return self.orig_datetime.utcfromtimestamp(self.time) datetime.datetime = FakeDateTime(self.time, self.increment, self.old_datetime)
AttributeError
dataset/ETHPy150Open google/grr/grr/lib/test_lib.py/FakeDateTimeUTC.__enter__
def _FindElement(self, selector): try: selector_type, effective_selector = selector.split("=", 1) except __HOLE__: effective_selector = selector selector_type = None if selector_type == "css": elems = self.driver.execute_script( "return $(\"" + effective_selector.replace("\"", "\\\"") + "\");") elems = [e for e in elems if e.is_displayed()] if not elems: raise exceptions.NoSuchElementException() else: return elems[0] elif selector_type == "link": links = self.driver.find_elements_by_partial_link_text(effective_selector) for l in links: if l.text.strip() == effective_selector: return l raise exceptions.NoSuchElementException() elif selector_type == "xpath": return self.driver.find_element_by_xpath(effective_selector) elif selector_type == "id": return self.driver.find_element_by_id(effective_selector) elif selector_type == "name": return self.driver.find_element_by_name(effective_selector) elif selector_type is None: if effective_selector.startswith("//"): return self.driver.find_element_by_xpath(effective_selector) else: return self.driver.find_element_by_id(effective_selector) else: raise RuntimeError("unknown selector type %s" % selector_type)
ValueError
dataset/ETHPy150Open google/grr/grr/lib/test_lib.py/GRRSeleniumTest._FindElement
def loadTestsFromName(self, name, module=None): """Load the tests named.""" parts = name.split(".") try: test_cases = self.loadTestsFromTestCase(self.base_class.classes[parts[0]]) except __HOLE__: raise RuntimeError("Unable to find test %r - is it registered?" % name) # Specifies the whole test suite. if len(parts) == 1: return self.suiteClass(test_cases) elif len(parts) == 2: cls = self.base_class.classes[parts[0]] return unittest.TestSuite([cls(parts[1])])
KeyError
dataset/ETHPy150Open google/grr/grr/lib/test_lib.py/GRRTestLoader.loadTestsFromName
def __init__(self, run, stdout, stderr, stdin, env=None): _ = env Popen.running_args = run Popen.stdout = stdout Popen.stderr = stderr Popen.stdin = stdin Popen.returncode = 0 try: # Store the content of the executable file. Popen.binary = open(run[0]).read() except __HOLE__: Popen.binary = None
IOError
dataset/ETHPy150Open google/grr/grr/lib/test_lib.py/Popen.__init__
def CheckFlowErrors(total_flows, token=None): # Check that all the flows are complete. for session_id in total_flows: try: flow_obj = aff4.FACTORY.Open(session_id, aff4_type="GRRFlow", mode="r", token=token) except __HOLE__: continue if flow_obj.state.context.state != rdf_flows.Flow.State.TERMINATED: if flags.FLAGS.debug: pdb.set_trace() raise RuntimeError("Flow %s completed in state %s" % ( flow_obj.state.context.args.flow_name, flow_obj.state.context.state))
IOError
dataset/ETHPy150Open google/grr/grr/lib/test_lib.py/CheckFlowErrors
def GetProfileByName(self, profile_name, version="v1.0"): try: profile_data = open(os.path.join( config_lib.CONFIG["Test.data_dir"], "profiles", version, profile_name + ".gz"), "rb").read() self.profiles_served += 1 return rdf_rekall_types.RekallProfile(name=profile_name, version=version, data=profile_data) except __HOLE__: return None
IOError
dataset/ETHPy150Open google/grr/grr/lib/test_lib.py/TestRekallRepositoryProfileServer.GetProfileByName
def get_prog(): try: if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'): return "%s -m pip" % sys.executable except (__HOLE__, TypeError, IndexError): pass return 'pip'
AttributeError
dataset/ETHPy150Open balanced/status.balancedpayments.com/venv/lib/python2.7/site-packages/pip-1.3.1-py2.7.egg/pip/util.py/get_prog
def renames(old, new): """Like os.renames(), but handles renaming across devices.""" # Implementation borrowed from os.renames(). head, tail = os.path.split(new) if head and tail and not os.path.exists(head): os.makedirs(head) shutil.move(old, new) head, tail = os.path.split(old) if head and tail: try: os.removedirs(head) except __HOLE__: pass
OSError
dataset/ETHPy150Open balanced/status.balancedpayments.com/venv/lib/python2.7/site-packages/pip-1.3.1-py2.7.egg/pip/util.py/renames
def untar_file(filename, location): """Untar the file (tar file located at filename) to the destination location""" if not os.path.exists(location): os.makedirs(location) if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'): mode = 'r:gz' elif filename.lower().endswith('.bz2') or filename.lower().endswith('.tbz'): mode = 'r:bz2' elif filename.lower().endswith('.tar'): mode = 'r' else: logger.warn('Cannot determine compression type for file %s' % filename) mode = 'r:*' tar = tarfile.open(filename, mode) try: # note: python<=2.5 doesnt seem to know about pax headers, filter them leading = has_leading_dir([ member.name for member in tar.getmembers() if member.name != 'pax_global_header' ]) for member in tar.getmembers(): fn = member.name if fn == 'pax_global_header': continue if leading: fn = split_leading_dir(fn)[1] path = os.path.join(location, fn) if member.isdir(): if not os.path.exists(path): os.makedirs(path) elif member.issym(): try: tar._extract_member(member, path) except: e = sys.exc_info()[1] # Some corrupt tar files seem to produce this # (specifically bad symlinks) logger.warn( 'In the tar file %s the member %s is invalid: %s' % (filename, member.name, e)) continue else: try: fp = tar.extractfile(member) except (__HOLE__, AttributeError): e = sys.exc_info()[1] # Some corrupt tar files seem to produce this # (specifically bad symlinks) logger.warn( 'In the tar file %s the member %s is invalid: %s' % (filename, member.name, e)) continue if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) destfp = open(path, 'wb') try: shutil.copyfileobj(fp, destfp) finally: destfp.close() fp.close() finally: tar.close()
KeyError
dataset/ETHPy150Open balanced/status.balancedpayments.com/venv/lib/python2.7/site-packages/pip-1.3.1-py2.7.egg/pip/util.py/untar_file
def ReadMatrix(file, separator="\t", numeric_type=numpy.float, take="all", headers=False ): """read a matrix. There probably is a routine for this in Numpy, which I haven't found yet. """ lines = filter(lambda x: x[0] != "#", file.readlines()) row_headers, col_headers = [], [] if headers: col_headers = lines[0][:-1].split("\t")[1:] del lines[0] num_rows = len(lines) if take != "all": num_cols = len(take) else: l = len(string.split(lines[0][:-1], "\t")) if headers: take = range(1, l) else: take = range(0, l) num_cols = len(take) matrix = numpy.zeros((num_rows, num_cols), numeric_type) nrow = 0 for l in lines: data = l[:-1].split("\t") if headers: row_headers.append(data[0]) try: data = map(lambda x: float(data[x]), take) except __HOLE__: print "error parsing data", data raise matrix[nrow] = data nrow += 1 return matrix, row_headers, col_headers
ValueError
dataset/ETHPy150Open CGATOxford/cgat/CGAT/MatlabTools.py/ReadMatrix
def readMatrix(infile, format="full", separator="\t", numeric_type=numpy.float, take="all", headers=True, missing=None, ): """read a matrix from file ane return a numpy matrix. formats accepted are: * full * sparse * phylip """ row_headers, col_headers = [], [] lines = filter(lambda x: x[0] != "#", infile.readlines()) if len(lines) == 0: raise IOError("no input") if format == "full": if headers: col_headers = lines[0][:-1].split("\t")[1:] del lines[0] num_rows = len(lines) if take != "all": num_cols = len(take) else: l = len(string.split(lines[0][:-1], "\t")) if headers: take = range(1, l) else: take = range(0, l) num_cols = len(take) matrix = numpy.zeros((num_rows, num_cols), numeric_type) nrow = 0 for l in lines: data = l[:-1].split("\t") if headers: row_headers.append(data[0]) if missing is None: try: data = map(lambda x: float(data[x]), take) except ValueError, msg: raise ValueError("error %s: data=%s" % (msg, str(data))) except IndexError, msg: raise IndexError("error %s: data=%s" % (msg, str(data))) else: d = [] for x in take: try: d.append(float(data[x])) except ValueError: d.append(missing) except __HOLE__, msg: raise IndexError( "error %s: data=%s" % (msg, str(data))) data = d matrix[nrow] = data nrow += 1 elif format == "phylip": # read in symmetric phylip matrices # note: they can wrap around if take != "all": raise "phylip matrix does not support take - only full matrices are processed." if not headers: raise "phylip matrix always has headers." num_rows = int(lines[0].strip()) num_cols = num_rows matrix = numpy.zeros((num_rows, num_cols), numeric_type) take = range(1, num_rows) nrow = 0 ncol = 0 for l in lines[1:]: data = re.split("\s+", l[:-1]) if ncol == 0: row_headers.append(data[0]) try: data = map(float, data[1:len(data)]) except ValueError: raise "parsing error in conversion to float in line %s" % l for x in range(len(data)): matrix[nrow][ncol] = data[x] ncol += 1 # deal with wrapping around if ncol == num_cols: ncol = 0 nrow += 1 col_headers = row_headers return matrix, row_headers, col_headers
IndexError
dataset/ETHPy150Open CGATOxford/cgat/CGAT/MatlabTools.py/readMatrix
def main(): parser = argparse.ArgumentParser() parser.add_argument('-d', '--delay', type=int, default=0) args = parser.parse_args() if sys.stdin.isatty(): parser.error('no input, pipe another btc command output into this command') torrents = sys.stdin.read() if len(torrents.strip()) == 0: exit(1) try: torrents = decoder.decode(torrents) except __HOLE__: error('unexpected input: %s' % torrents) time.sleep(args.delay) hashes = [t['hash'] for t in torrents] for h in hashes: client.stop_torrent(h) while True: d = list_to_dict(client.list_torrents(), 'hash') all_stopped = True for h in d: if h not in hashes: continue if d[h]['state'] not in ('STOPPED', 'FINISHED'): all_stopped = False break if all_stopped: break time.sleep(1) if not sys.stdout.isatty(): d = list_to_dict(client.list_torrents(), 'hash') d = dict((h, d[h]) for h in hashes if h in d) print(encoder.encode(dict_to_list(d, 'hash')))
ValueError
dataset/ETHPy150Open bittorrent/btc/btc/btc_stop.py/main
def initlog(*allargs): """Write a log message, if there is a log file. Even though this function is called initlog(), you should always use log(); log is a variable that is set either to initlog (initially), to dolog (once the log file has been opened), or to nolog (when logging is disabled). The first argument is a format string; the remaining arguments (if any) are arguments to the % operator, so e.g. log("%s: %s", "a", "b") will write "a: b" to the log file, followed by a newline. If the global logfp is not None, it should be a file object to which log data is written. If the global logfp is None, the global logfile may be a string giving a filename to open, in append mode. This file should be world writable!!! If the file can't be opened, logging is silently disabled (since there is no safe place where we could send an error message). """ global logfp, log if logfile and not logfp: try: logfp = open(logfile, "a") except __HOLE__: pass if not logfp: log = nolog else: log = dolog log(*allargs)
IOError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/cgi.py/initlog
def parse_multipart(fp, pdict): """Parse multipart input. Arguments: fp : input file pdict: dictionary containing other parameters of content-type header Returns a dictionary just like parse_qs(): keys are the field names, each value is a list of values for that field. This is easy to use but not much good if you are expecting megabytes to be uploaded -- in that case, use the FieldStorage class instead which is much more flexible. Note that content-type is the raw, unparsed contents of the content-type header. XXX This does not parse nested multipart parts -- use FieldStorage for that. XXX This should really be subsumed by FieldStorage altogether -- no point in having two implementations of the same parsing algorithm. Also, FieldStorage protects itself better against certain DoS attacks by limiting the size of the data read in one chunk. The API here does not support that kind of protection. This also affects parse() since it can call parse_multipart(). """ boundary = "" if 'boundary' in pdict: boundary = pdict['boundary'] if not valid_boundary(boundary): raise ValueError, ('Invalid boundary in multipart form: %r' % (boundary,)) nextpart = "--" + boundary lastpart = "--" + boundary + "--" partdict = {} terminator = "" while terminator != lastpart: bytes = -1 data = None if terminator: # At start of next part. Read headers first. headers = mimetools.Message(fp) clength = headers.getheader('content-length') if clength: try: bytes = int(clength) except __HOLE__: pass if bytes > 0: if maxlen and bytes > maxlen: raise ValueError, 'Maximum content length exceeded' data = fp.read(bytes) else: data = "" # Read lines until end of part. lines = [] while 1: line = fp.readline() if not line: terminator = lastpart # End outer loop break if line[:2] == "--": terminator = line.strip() if terminator in (nextpart, lastpart): break lines.append(line) # Done with part. if data is None: continue if bytes < 0: if lines: # Strip final line terminator line = lines[-1] if line[-2:] == "\r\n": line = line[:-2] elif line[-1:] == "\n": line = line[:-1] lines[-1] = line data = "".join(lines) line = headers['content-disposition'] if not line: continue key, params = parse_header(line) if key != 'form-data': continue if 'name' in params: name = params['name'] else: continue if name in partdict: partdict[name].append(data) else: partdict[name] = [data] return partdict
ValueError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/cgi.py/parse_multipart
def __init__(self, fp=None, headers=None, outerboundary="", environ=os.environ, keep_blank_values=0, strict_parsing=0): """Constructor. Read multipart/* until last part. Arguments, all optional: fp : file pointer; default: sys.stdin (not used when the request method is GET) headers : header dictionary-like object; default: taken from environ as per CGI spec outerboundary : terminating multipart boundary (for internal use only) environ : environment dictionary; default: os.environ keep_blank_values: flag indicating whether blank values in percent-encoded forms should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception. """ method = 'GET' self.keep_blank_values = keep_blank_values self.strict_parsing = strict_parsing if 'REQUEST_METHOD' in environ: method = environ['REQUEST_METHOD'].upper() self.qs_on_post = None if method == 'GET' or method == 'HEAD': if 'QUERY_STRING' in environ: qs = environ['QUERY_STRING'] elif sys.argv[1:]: qs = sys.argv[1] else: qs = "" fp = StringIO(qs) if headers is None: headers = {'content-type': "application/x-www-form-urlencoded"} if headers is None: headers = {} if method == 'POST': # Set default content-type for POST to what's traditional headers['content-type'] = "application/x-www-form-urlencoded" if 'CONTENT_TYPE' in environ: headers['content-type'] = environ['CONTENT_TYPE'] if 'QUERY_STRING' in environ: self.qs_on_post = environ['QUERY_STRING'] if 'CONTENT_LENGTH' in environ: headers['content-length'] = environ['CONTENT_LENGTH'] self.fp = fp or sys.stdin self.headers = headers self.outerboundary = outerboundary # Process content-disposition header cdisp, pdict = "", {} if 'content-disposition' in self.headers: cdisp, pdict = parse_header(self.headers['content-disposition']) self.disposition = cdisp self.disposition_options = pdict self.name = None if 'name' in pdict: self.name = pdict['name'] self.filename = None if 'filename' in pdict: self.filename = pdict['filename'] # Process content-type header # # Honor any existing content-type header. But if there is no # content-type header, use some sensible defaults. Assume # outerboundary is "" at the outer level, but something non-false # inside a multi-part. The default for an inner part is text/plain, # but for an outer part it should be urlencoded. This should catch # bogus clients which erroneously forget to include a content-type # header. # # See below for what we do if there does exist a content-type header, # but it happens to be something we don't understand. if 'content-type' in self.headers: ctype, pdict = parse_header(self.headers['content-type']) elif self.outerboundary or method != 'POST': ctype, pdict = "text/plain", {} else: ctype, pdict = 'application/x-www-form-urlencoded', {} self.type = ctype self.type_options = pdict self.innerboundary = "" if 'boundary' in pdict: self.innerboundary = pdict['boundary'] clen = -1 if 'content-length' in self.headers: try: clen = int(self.headers['content-length']) except __HOLE__: pass if maxlen and clen > maxlen: raise ValueError, 'Maximum content length exceeded' self.length = clen self.list = self.file = None self.done = 0 if ctype == 'application/x-www-form-urlencoded': self.read_urlencoded() elif ctype[:10] == 'multipart/': self.read_multi(environ, keep_blank_values, strict_parsing) else: self.read_single()
ValueError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/cgi.py/FieldStorage.__init__
def __getitem__(self, key): v = SvFormContentDict.__getitem__(self, key) if v[0] in '0123456789+-.': try: return int(v) except __HOLE__: try: return float(v) except ValueError: pass return v.strip()
ValueError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/cgi.py/InterpFormContentDict.__getitem__
def values(self): result = [] for key in self.keys(): try: result.append(self[key]) except __HOLE__: result.append(self.dict[key]) return result
IndexError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/cgi.py/InterpFormContentDict.values
def items(self): result = [] for key in self.keys(): try: result.append((key, self[key])) except __HOLE__: result.append((key, self.dict[key])) return result
IndexError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/cgi.py/InterpFormContentDict.items
def GetHashDigest(filename): """ Get the sha1 digest of `filename`)""" try: fp = open(filename, mode='rb') digest = hashlib.sha1(fp.read()).hexdigest() fp.close() return digest except __HOLE__ as e: sys.stderr.write(str(e)) sys.exit(1) return
IOError
dataset/ETHPy150Open spranesh/Redhawk/redhawk/utils/util.py/GetHashDigest
def GuessLanguage(filename): """ Attempts to Guess Langauge of `filename`. Essentially, we do a filename.rsplit('.', 1), and a lookup into a dictionary of extensions.""" try: (_, extension) = filename.rsplit('.', 1) except __HOLE__: raise ValueError("Could not guess language as '%s' does not have an \ extension"%filename) return {'c' : 'c' ,'py' : 'python'}[extension]
ValueError
dataset/ETHPy150Open spranesh/Redhawk/redhawk/utils/util.py/GuessLanguage
def StartShell(local_vars, banner='', try_ipython=True): """ Start a shell, with the given local variables. It prints the given banner as a welcome message.""" def IPythonShell(namespace, banner): from IPython.Shell import IPShell ipshell = IPShell(user_ns = namespace) ipshell.mainloop(banner=banner) def PythonShell(namespace, banner): import readline, rlcompleter, code readline.parse_and_bind("tab: complete") readline.set_completer(rlcompleter.Completer(namespace).complete) code.interact(local=namespace, banner=banner) if try_ipython: try: IPythonShell(local_vars, banner) return except __HOLE__ as e: pass else: PythonShell(local_vars, banner) return
ImportError
dataset/ETHPy150Open spranesh/Redhawk/redhawk/utils/util.py/StartShell
def get_from_clause(self): """ Returns a list of strings that are joined together to go after the "FROM" part of the query, as well as a list any extra parameters that need to be included. Sub-classes, can override this to create a from-clause via a "select". This should only be called after any SQL construction methods that might change the tables we need. This means the select columns and ordering must be done first. """ result = [] qn = self.quote_name_unless_alias qn2 = self.connection.ops.quote_name index_map = self.query.index_map first = True from_params = [] for alias in self.query.tables: if not self.query.alias_refcount[alias]: continue try: name, alias, join_type, lhs, join_cols, _, join_field = ( self.query.alias_map[alias]) except __HOLE__: # Extra tables can end up in self.tables, but not in the # alias_map if they aren't in a join. That's OK. We skip them. continue alias_str = (alias != name and ' %s' % alias or '') ### jbalogh wuz here. ### if name in index_map: use_index = 'USE INDEX (%s)' % qn(index_map[name]) else: use_index = '' if join_type and not first: extra_cond = join_field.get_extra_restriction( self.query.where_class, alias, lhs) if extra_cond: extra_sql, extra_params = extra_cond.as_sql( qn, self.connection) extra_sql = 'AND (%s)' % extra_sql from_params.extend(extra_params) else: extra_sql = "" result.append('%s %s%s %s ON (' % (join_type, qn(name), alias_str, use_index)) for index, (lhs_col, rhs_col) in enumerate(join_cols): if index != 0: result.append(' AND ') result.append( '%s.%s = %s.%s' % (qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col))) result.append('%s)' % extra_sql) else: connector = connector = '' if first else ', ' result.append('%s%s%s %s' % (connector, qn(name), alias_str, use_index)) ### jbalogh out. ### first = False for t in self.query.extra_tables: alias, unused = self.query.table_alias(t) # Only add the alias if it's not already present (the table_alias() # calls increments the refcount, so an alias refcount of one means # this is the only reference. if (alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1): connector = not first and ', ' or '' result.append('%s%s' % (connector, qn(alias))) first = False return result, from_params
KeyError
dataset/ETHPy150Open mozilla/addons-server/src/olympia/addons/query.py/IndexCompiler.get_from_clause
def inlines(value, return_list=False): try: from BeautifulSoup import BeautifulStoneSoup except __HOLE__: from beautifulsoup import BeautifulStoneSoup content = BeautifulStoneSoup(value, selfClosingTags=['inline', 'img', 'br', 'input', 'meta', 'link', 'hr']) # Return a list of inline objects found in the value. if return_list: inline_list = [] for inline in content.findAll('inline'): rendered_inline = render_inline(inline) inline_list.append(rendered_inline['context']) return inline_list # Replace inline markup in the value with rendered inline templates. else: for inline in content.findAll('inline'): rendered_inline = render_inline(inline) if rendered_inline: inline_template = render_to_string(rendered_inline['template'], rendered_inline['context']) else: inline_template = '' value = value.replace(str(inline), inline_template) return mark_safe(unicode(value))
ImportError
dataset/ETHPy150Open pigmonkey/django-inlineobjects/inlines/parser.py/inlines
def render_inline(inline): """ Replace inline markup with template markup that matches the appropriate app and model. """ # Look for inline type, 'app.model' try: app_label, model_name = inline['type'].split('.') except: if settings.DEBUG: raise TemplateSyntaxError("Couldn't find the attribute 'type' in " "the <inline> tag.") else: return '' # Look for content type try: content_type = ContentType.objects.get(app_label=app_label, model=model_name) model = content_type.model_class() except ContentType.DoesNotExist: if settings.DEBUG: raise TemplateSyntaxError("Inline ContentType not found.") else: return '' # Create the context with all the attributes in the inline markup. context = dict((attr[0], attr[1]) for attr in inline.attrs) # If multiple IDs were specified, build a list of all requested objects # and add them to the context. try: try: id_list = [int(i) for i in inline['ids'].split(',')] obj_list = model.objects.in_bulk(id_list) obj_list = list(obj_list[int(i)] for i in id_list) context['object_list'] = obj_list except ValueError: if settings.DEBUG: raise ValueError("The <inline> ids attribute is missing or " "invalid.") else: return '' # If only one ID was specified, retrieve the requested object and add it # to the context. except __HOLE__: try: obj = model.objects.get(pk=inline['id']) context['object'] = obj context['settings'] = settings except model.DoesNotExist: if settings.DEBUG: raise model.DoesNotExist("%s with pk of '%s' does not exist" % (model_name, inline['id'])) else: return '' except: if settings.DEBUG: raise TemplateSyntaxError("The <inline> id attribute is " "missing or invalid.") else: return '' # Set the name of the template that should be used to render the inline. template = ["inlines/%s_%s.html" % (app_label, model_name), "inlines/default.html"] # Return the template name and the context. return {'template': template, 'context': context}
KeyError
dataset/ETHPy150Open pigmonkey/django-inlineobjects/inlines/parser.py/render_inline
def get_user_model(): """ Get the user model that is being used. If the `get_user_model` method is not available, default back to the standard User model provided through `django.contrib.auth`. """ try: from django.contrib.auth import get_user_model return get_user_model() except __HOLE__: from django.contrib.auth.models import User return User
ImportError
dataset/ETHPy150Open Rediker-Software/doac/doac/compat.py/get_user_model
def test_bad_traceback(self): result = "JJackson's SSN: 555-55-5555" try: # copied from couchdbkit/client.py assert isinstance(result, dict), 'received an invalid ' \ 'response of type %s: %s' % (type(result), repr(result)) except __HOLE__ as e: pass self.assertIn(result, e.message) self.assertNotIn(result, clean_exception(e).message)
AssertionError
dataset/ETHPy150Open dimagi/commcare-hq/corehq/util/tests/test_log.py/TestLogging.test_bad_traceback
def __init__(self, *args, **kwds): if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__end except __HOLE__: self.clear() self.update(*args, **kwds)
AttributeError
dataset/ETHPy150Open timothycrosley/jiphy/jiphy/pie_slice.py/OrderedDict.__init__
def load_config_file(self, suppress_errors=True): """Load the config file. By default, errors in loading config are handled, and a warning printed on screen. For testing, the suppress_errors option is set to False, so errors will make tests fail. """ base_config = 'ipython_config.py' self.log.debug("Attempting to load config file: %s" % base_config) try: Application.load_config_file( self, base_config, path=self.config_file_paths ) except IOError: # ignore errors loading parent pass if self.config_file_name == base_config: # don't load secondary config return self.log.debug("Attempting to load config file: %s" % self.config_file_name) try: Application.load_config_file( self, self.config_file_name, path=self.config_file_paths ) except __HOLE__: # Only warn if the default config file was NOT being used. if self.config_file_specified: self.log.warn("Config file not found, skipping: %s" % self.config_file_name) except: # For testing purposes. if not suppress_errors: raise self.log.warn("Error loading config file: %s" % self.config_file_name, exc_info=True)
IOError
dataset/ETHPy150Open ipython/ipython-py3k/IPython/core/application.py/BaseIPythonApplication.load_config_file
def init_profile_dir(self): """initialize the profile dir""" try: # location explicitly specified: location = self.config.ProfileDir.location except __HOLE__: # location not specified, find by profile name try: p = ProfileDir.find_profile_dir_by_name(self.ipython_dir, self.profile, self.config) except ProfileDirError: # not found, maybe create it (always create default profile) if self.auto_create or self.profile=='python3': try: p = ProfileDir.create_profile_dir_by_name(self.ipython_dir, self.profile, self.config) except ProfileDirError: self.log.fatal("Could not create profile: %r"%self.profile) self.exit(1) else: self.log.info("Created profile dir: %r"%p.location) else: self.log.fatal("Profile %r not found."%self.profile) self.exit(1) else: self.log.info("Using existing profile dir: %r"%p.location) else: # location is fully specified try: p = ProfileDir.find_profile_dir(location, self.config) except ProfileDirError: # not found, maybe create it if self.auto_create: try: p = ProfileDir.create_profile_dir(location, self.config) except ProfileDirError: self.log.fatal("Could not create profile directory: %r"%location) self.exit(1) else: self.log.info("Creating new profile dir: %r"%location) else: self.log.fatal("Profile directory %r not found."%location) self.exit(1) else: self.log.info("Using existing profile dir: %r"%location) self.profile_dir = p self.config_file_paths.append(p.location)
AttributeError
dataset/ETHPy150Open ipython/ipython-py3k/IPython/core/application.py/BaseIPythonApplication.init_profile_dir
@property def env(self): raw_env = self.settings['raw_env'].get() env = {} if not raw_env: return env for e in raw_env: s = _compat.bytes_to_str(e) try: k, v = s.split('=', 1) except __HOLE__: raise RuntimeError("environment setting %r invalid" % s) env[k] = v return env
ValueError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/gunicorn/config.py/Config.env
def validate_callable(arity): def _validate_callable(val): if isinstance(val, six.string_types): try: mod_name, obj_name = val.rsplit(".", 1) except __HOLE__: raise TypeError("Value '%s' is not import string. " "Format: module[.submodules...].object" % val) try: mod = __import__(mod_name, fromlist=[obj_name]) val = getattr(mod, obj_name) except ImportError as e: raise TypeError(str(e)) except AttributeError: raise TypeError("Can not load '%s' from '%s'" "" % (obj_name, mod_name)) if not six.callable(val): raise TypeError("Value is not six.callable: %s" % val) if arity != -1 and arity != len(inspect.getargspec(val)[0]): raise TypeError("Value must have an arity of: %s" % arity) return val return _validate_callable
ValueError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/gunicorn/config.py/validate_callable
def validate_user(val): if val is None: return os.geteuid() if isinstance(val, int): return val elif val.isdigit(): return int(val) else: try: return pwd.getpwnam(val).pw_uid except __HOLE__: raise ConfigError("No such user: '%s'" % val)
KeyError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/gunicorn/config.py/validate_user
def validate_group(val): if val is None: return os.getegid() if isinstance(val, int): return val elif val.isdigit(): return int(val) else: try: return grp.getgrnam(val).gr_gid except __HOLE__: raise ConfigError("No such group: '%s'" % val)
KeyError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/gunicorn/config.py/validate_group
def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0, link=None, verbose=1, dry_run=0): """Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is copied there with the same name; otherwise, it must be a filename. (If the file exists, it will be ruthlessly clobbered.) If 'preserve_mode' is true (the default), the file's mode (type and permission bits, or whatever is analogous on the current platform) is copied. If 'preserve_times' is true (the default), the last-modified and last-access times are copied as well. If 'update' is true, 'src' will only be copied if 'dst' does not exist, or if 'dst' does exist but is older than 'src'. 'link' allows you to make hard links (os.link) or symbolic links (os.symlink) instead of copying: set it to "hard" or "sym"; if it is None (the default), files are copied. Don't set 'link' on systems that don't support it: 'copy_file()' doesn't check if hard or symbolic linking is available. Under Mac OS, uses the native file copy function in macostools; on other systems, uses '_copy_file_contents()' to copy file contents. Return a tuple (dest_name, copied): 'dest_name' is the actual name of the output file, and 'copied' is true if the file was copied (or would have been copied, if 'dry_run' true). """ # XXX if the destination file already exists, we clobber it if # copying, but blow up if linking. Hmmm. And I don't know what # macostools.copyfile() does. Should definitely be consistent, and # should probably blow up if destination exists and we would be # changing it (ie. it's not already a hard/soft link to src OR # (not update) and (src newer than dst). from distutils.dep_util import newer from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE if not os.path.isfile(src): raise DistutilsFileError( "can't copy '%s': doesn't exist or not a regular file" % src) if os.path.isdir(dst): dir = dst dst = os.path.join(dst, os.path.basename(src)) else: dir = os.path.dirname(dst) if update and not newer(src, dst): if verbose >= 1: log.debug("not copying %s (output up-to-date)", src) return dst, 0 try: action = _copy_action[link] except __HOLE__: raise ValueError("invalid value '%s' for 'link' argument" % link) if verbose >= 1: if os.path.basename(dst) == os.path.basename(src): log.info("%s %s -> %s", action, src, dir) else: log.info("%s %s -> %s", action, src, dst) if dry_run: return (dst, 1) # If linking (hard or symbolic), use the appropriate system call # (Unix only, of course, but that's the caller's responsibility) if link == 'hard': if not (os.path.exists(dst) and os.path.samefile(src, dst)): os.link(src, dst) elif link == 'sym': if not (os.path.exists(dst) and os.path.samefile(src, dst)): os.symlink(src, dst) # Otherwise (non-Mac, not linking), copy the file contents and # (optionally) copy the times and mode. else: _copy_file_contents(src, dst) if preserve_mode or preserve_times: st = os.stat(src) # According to David Ascher <[email protected]>, utime() should be done # before chmod() (at least under NT). if preserve_times: os.utime(dst, (st[ST_ATIME], st[ST_MTIME])) if preserve_mode: os.chmod(dst, S_IMODE(st[ST_MODE])) return (dst, 1) # XXX I suspect this is Unix-specific -- need porting help!
KeyError
dataset/ETHPy150Open ctxis/canape/CANAPE.Scripting/Lib/distutils/file_util.py/copy_file
def main(): """ azurectl - invoke the Application """ docopt.__dict__['extras'] = extras logger.init() try: App() except AzureError as e: # known exception, log information and exit logger.log.error('%s: %s', type(e).__name__, format(e)) sys.exit(1) except docopt.DocoptExit as e: # exception caught by docopt, results in usage message usage(e) sys.exit(1) except __HOLE__: # user exception, program aborted by user sys.exit(1) except Exception: # exception we did no expect, show python backtrace logger.log.error('Unexpected error:') raise
SystemExit
dataset/ETHPy150Open SUSE/azurectl/azurectl/azurectl.py/main
def _delete_conntrack_state(self, device_info_list, rule, remote_ip=None): conntrack_cmds = self._get_conntrack_cmds(device_info_list, rule, remote_ip) for cmd in conntrack_cmds: try: self.execute(list(cmd), run_as_root=True, check_exit_code=True, extra_ok_codes=[1]) except __HOLE__: LOG.exception( _LE("Failed execute conntrack command %s"), str(cmd))
RuntimeError
dataset/ETHPy150Open openstack/neutron/neutron/agent/linux/ip_conntrack.py/IpConntrackManager._delete_conntrack_state
def _to_int_with_arithmetics(self, item): item = str(item) try: return int(item) except __HOLE__: return int(eval(item))
ValueError
dataset/ETHPy150Open shellderp/sublime-robot-plugin/lib/robot/running/keywords.py/ForLoop._to_int_with_arithmetics
def get_all_layers(layer, treat_as_input=None): """ This function gathers all layers below one or more given :class:`Layer` instances, including the given layer(s). Its main use is to collect all layers of a network just given the output layer(s). The layers are guaranteed to be returned in a topological order: a layer in the result list is always preceded by all layers its input depends on. Parameters ---------- layer : Layer or list the :class:`Layer` instance for which to gather all layers feeding into it, or a list of :class:`Layer` instances. treat_as_input : None or iterable an iterable of :class:`Layer` instances to treat as input layers with no layers feeding into them. They will show up in the result list, but their incoming layers will not be collected (unless they are required for other layers as well). Returns ------- list a list of :class:`Layer` instances feeding into the given instance(s) either directly or indirectly, and the given instance(s) themselves, in topological order. Examples -------- >>> from lasagne.layers import InputLayer, DenseLayer >>> l_in = InputLayer((100, 20)) >>> l1 = DenseLayer(l_in, num_units=50) >>> get_all_layers(l1) == [l_in, l1] True >>> l2 = DenseLayer(l_in, num_units=10) >>> get_all_layers([l2, l1]) == [l_in, l2, l1] True >>> get_all_layers([l1, l2]) == [l_in, l1, l2] True >>> l3 = DenseLayer(l2, num_units=20) >>> get_all_layers(l3) == [l_in, l2, l3] True >>> get_all_layers(l3, treat_as_input=[l2]) == [l2, l3] True """ # We perform a depth-first search. We add a layer to the result list only # after adding all its incoming layers (if any) or when detecting a cycle. # We use a LIFO stack to avoid ever running into recursion depth limits. try: queue = deque(layer) except __HOLE__: queue = deque([layer]) seen = set() done = set() result = [] # If treat_as_input is given, we pretend we've already collected all their # incoming layers. if treat_as_input is not None: seen.update(treat_as_input) while queue: # Peek at the leftmost node in the queue. layer = queue[0] if layer is None: # Some node had an input_layer set to `None`. Just ignore it. queue.popleft() elif layer not in seen: # We haven't seen this node yet: Mark it and queue all incomings # to be processed first. If there are no incomings, the node will # be appended to the result list in the next iteration. seen.add(layer) if hasattr(layer, 'input_layers'): queue.extendleft(reversed(layer.input_layers)) elif hasattr(layer, 'input_layer'): queue.appendleft(layer.input_layer) else: # We've been here before: Either we've finished all its incomings, # or we've detected a cycle. In both cases, we remove the layer # from the queue and append it to the result list. queue.popleft() if layer not in done: result.append(layer) done.add(layer) return result
TypeError
dataset/ETHPy150Open Lasagne/Lasagne/lasagne/layers/helper.py/get_all_layers
def get_output(layer_or_layers, inputs=None, **kwargs): """ Computes the output of the network at one or more given layers. Optionally, you can define the input(s) to propagate through the network instead of using the input variable(s) associated with the network's input layer(s). Parameters ---------- layer_or_layers : Layer or list the :class:`Layer` instance for which to compute the output expressions, or a list of :class:`Layer` instances. inputs : None, Theano expression, numpy array, or dict If None, uses the input variables associated with the :class:`InputLayer` instances. If a Theano expression, this defines the input for a single :class:`InputLayer` instance. Will throw a ValueError if there are multiple :class:`InputLayer` instances. If a numpy array, this will be wrapped as a Theano constant and used just like a Theano expression. If a dictionary, any :class:`Layer` instance (including the input layers) can be mapped to a Theano expression or numpy array to use instead of its regular output. Returns ------- output : Theano expression or list the output of the given layer(s) for the given network input Notes ----- Depending on your network architecture, `get_output([l1, l2])` may be crucially different from `[get_output(l1), get_output(l2)]`. Only the former ensures that the output expressions depend on the same intermediate expressions. For example, when `l1` and `l2` depend on a common dropout layer, the former will use the same dropout mask for both, while the latter will use two different dropout masks. """ from .input import InputLayer from .base import MergeLayer # track accepted kwargs used by get_output_for accepted_kwargs = {'deterministic'} # obtain topological ordering of all layers the output layer(s) depend on treat_as_input = inputs.keys() if isinstance(inputs, dict) else [] all_layers = get_all_layers(layer_or_layers, treat_as_input) # initialize layer-to-expression mapping from all input layers all_outputs = dict((layer, layer.input_var) for layer in all_layers if isinstance(layer, InputLayer) and layer not in treat_as_input) # update layer-to-expression mapping from given input(s), if any if isinstance(inputs, dict): all_outputs.update((layer, utils.as_theano_expression(expr)) for layer, expr in inputs.items()) elif inputs is not None: if len(all_outputs) > 1: raise ValueError("get_output() was called with a single input " "expression on a network with multiple input " "layers. Please call it with a dictionary of " "input expressions instead.") for input_layer in all_outputs: all_outputs[input_layer] = utils.as_theano_expression(inputs) # update layer-to-expression mapping by propagating the inputs for layer in all_layers: if layer not in all_outputs: try: if isinstance(layer, MergeLayer): layer_inputs = [all_outputs[input_layer] for input_layer in layer.input_layers] else: layer_inputs = all_outputs[layer.input_layer] except KeyError: # one of the input_layer attributes must have been `None` raise ValueError("get_output() was called without giving an " "input expression for the free-floating " "layer %r. Please call it with a dictionary " "mapping this layer to an input expression." % layer) all_outputs[layer] = layer.get_output_for(layer_inputs, **kwargs) try: names, _, _, defaults = getargspec(layer.get_output_for) except TypeError: # If introspection is not possible, skip it pass else: if defaults is not None: accepted_kwargs |= set(names[-len(defaults):]) accepted_kwargs |= set(layer.get_output_kwargs) unused_kwargs = set(kwargs.keys()) - accepted_kwargs if unused_kwargs: suggestions = [] for kwarg in unused_kwargs: suggestion = get_close_matches(kwarg, accepted_kwargs) if suggestion: suggestions.append('%s (perhaps you meant %s)' % (kwarg, suggestion[0])) else: suggestions.append(kwarg) warn("get_output() was called with unused kwargs:\n\t%s" % "\n\t".join(suggestions)) # return the output(s) of the requested layer(s) only try: return [all_outputs[layer] for layer in layer_or_layers] except __HOLE__: return all_outputs[layer_or_layers]
TypeError
dataset/ETHPy150Open Lasagne/Lasagne/lasagne/layers/helper.py/get_output
def get_output_shape(layer_or_layers, input_shapes=None): """ Computes the output shape of the network at one or more given layers. Parameters ---------- layer_or_layers : Layer or list the :class:`Layer` instance for which to compute the output shapes, or a list of :class:`Layer` instances. input_shapes : None, tuple, or dict If None, uses the input shapes associated with the :class:`InputLayer` instances. If a tuple, this defines the input shape for a single :class:`InputLayer` instance. Will throw a ValueError if there are multiple :class:`InputLayer` instances. If a dictionary, any :class:`Layer` instance (including the input layers) can be mapped to a shape tuple to use instead of its regular output shape. Returns ------- tuple or list the output shape of the given layer(s) for the given network input """ # shortcut: return precomputed shapes if we do not need to propagate any if input_shapes is None or input_shapes == {}: try: return [layer.output_shape for layer in layer_or_layers] except __HOLE__: return layer_or_layers.output_shape from .input import InputLayer from .base import MergeLayer # obtain topological ordering of all layers the output layer(s) depend on if isinstance(input_shapes, dict): treat_as_input = input_shapes.keys() else: treat_as_input = [] all_layers = get_all_layers(layer_or_layers, treat_as_input) # initialize layer-to-shape mapping from all input layers all_shapes = dict((layer, layer.shape) for layer in all_layers if isinstance(layer, InputLayer) and layer not in treat_as_input) # update layer-to-shape mapping from given input(s), if any if isinstance(input_shapes, dict): all_shapes.update(input_shapes) elif input_shapes is not None: if len(all_shapes) > 1: raise ValueError("get_output_shape() was called with a single " "input shape on a network with multiple input " "layers. Please call it with a dictionary of " "input shapes instead.") for input_layer in all_shapes: all_shapes[input_layer] = input_shapes # update layer-to-shape mapping by propagating the input shapes for layer in all_layers: if layer not in all_shapes: if isinstance(layer, MergeLayer): input_shapes = [all_shapes[input_layer] for input_layer in layer.input_layers] else: input_shapes = all_shapes[layer.input_layer] all_shapes[layer] = layer.get_output_shape_for(input_shapes) # return the output shape(s) of the requested layer(s) only try: return [all_shapes[layer] for layer in layer_or_layers] except TypeError: return all_shapes[layer_or_layers]
TypeError
dataset/ETHPy150Open Lasagne/Lasagne/lasagne/layers/helper.py/get_output_shape
def main(): #args = GetArgs() try: si = None try: print "Trying to connect to VCENTER SERVER . . ." si = connect.Connect(inputs['vcenter_ip'], 443, inputs['vcenter_user'], inputs['vcenter_password']) except __HOLE__, e: pass atexit.register(Disconnect, si) print "Connected to VCENTER SERVER !" content = si.RetrieveContent() #vm_name = args.vm vm_name = inputs['vm_name'] vm = get_obj(content, [vim.VirtualMachine], vm_name) if vm.runtime.powerState != 'poweredOff': print "WARNING:: Power off your VM before reconfigure" sys.exit() adaptermap = vim.vm.customization.AdapterMapping() globalip = vim.vm.customization.GlobalIPSettings() adaptermap.adapter = vim.vm.customization.IPSettings() isDHDCP = inputs['isDHCP'] if not isDHDCP: """Static IP Configuration""" adaptermap.adapter.ip = vim.vm.customization.FixedIp() adaptermap.adapter.ip.ipAddress = inputs['vm_ip'] adaptermap.adapter.subnetMask = inputs['subnet'] adaptermap.adapter.gateway = inputs['gateway'] globalip.dnsServerList = inputs['dns'] else: """DHCP Configuration""" adaptermap.adapter.ip = vim.vm.customization.DhcpIpGenerator() adaptermap.adapter.dnsDomain = inputs['domain'] globalip = vim.vm.customization.GlobalIPSettings() #For Linux . For windows follow sysprep ident = vim.vm.customization.LinuxPrep(domain=inputs['domain'], hostName=vim.vm.customization.FixedName(name=vm_name)) customspec = vim.vm.customization.Specification() #For only one adapter customspec.identity = ident customspec.nicSettingMap = [adaptermap] customspec.globalIPSettings = globalip #Configuring network for a single NIC #For multipple NIC configuration contact me. print "Reconfiguring VM Networks . . ." task = vm.Customize(spec=customspec) # Wait for Network Reconfigure to complete wait_for_task(task, si) except vmodl.MethodFault, e: print "Caught vmodl fault: %s" % e.msg return 1 except Exception, e: print "Caught exception: %s" % str(e) return 1 # Start program
IOError
dataset/ETHPy150Open rreubenur/vmware-pyvmomi-examples/network_configure.py/main
def loadTagDict(dirPath): d = {} try: files = os.listdir(dirPath) except __HOLE__: return {} for path in files: # ignore hidden files if path.startswith('.'): continue c = TagFile(os.path.join(dirPath, path)) d[c.tag] = c return d
OSError
dataset/ETHPy150Open sassoftware/conary/conary/build/tags.py/loadTagDict
def get_sd_auth(val, sd_auth_pillar_name='serverdensity'): ''' Returns requested Server Density authentication value from pillar. CLI Example: .. code-block:: bash salt '*' serverdensity_device.get_sd_auth <val> ''' sd_pillar = __pillar__.get(sd_auth_pillar_name) log.debug('Server Density Pillar: {0}'.format(sd_pillar)) if not sd_pillar: log.error('Could not load {0} pillar'.format(sd_auth_pillar_name)) raise CommandExecutionError( '{0} pillar is required for authentication'.format(sd_auth_pillar_name) ) try: return sd_pillar[val] except __HOLE__: log.error('Could not find value {0} in pillar'.format(val)) raise CommandExecutionError('{0} value was not found in pillar'.format(val))
KeyError
dataset/ETHPy150Open saltstack/salt/salt/modules/serverdensity_device.py/get_sd_auth
def create(name, **params): ''' Function to create device in Server Density. For more info, see the `API docs`__. .. __: https://apidocs.serverdensity.com/Inventory/Devices/Creating CLI Example: .. code-block:: bash salt '*' serverdensity_device.create lama salt '*' serverdensity_device.create rich_lama group=lama_band installedRAM=32768 ''' log.debug('Server Density params: {0}'.format(params)) params = _clean_salt_variables(params) params['name'] = name api_response = requests.post( 'https://api.serverdensity.io/inventory/devices/', params={'token': get_sd_auth('api_token')}, data=params ) log.debug('Server Density API Response: {0}'.format(api_response)) log.debug('Server Density API Response content: {0}'.format(api_response.content)) if api_response.status_code == 200: try: return json.loads(api_response.content) except __HOLE__: log.error('Could not parse API Response content: {0}'.format(api_response.content)) raise CommandExecutionError( 'Failed to create, API Response: {0}'.format(api_response) ) else: return None
ValueError
dataset/ETHPy150Open saltstack/salt/salt/modules/serverdensity_device.py/create
def delete(device_id): ''' Delete a device from Server Density. For more information, see the `API docs`__. .. __: https://apidocs.serverdensity.com/Inventory/Devices/Deleting CLI Example: .. code-block:: bash salt '*' serverdensity_device.delete 51f7eafcdba4bb235e000ae4 ''' api_response = requests.delete( 'https://api.serverdensity.io/inventory/devices/' + device_id, params={'token': get_sd_auth('api_token')} ) log.debug('Server Density API Response: {0}'.format(api_response)) log.debug('Server Density API Response content: {0}'.format(api_response.content)) if api_response.status_code == 200: try: return json.loads(api_response.content) except __HOLE__: log.error('Could not parse API Response content: {0}'.format(api_response.content)) raise CommandExecutionError( 'Failed to create, API Response: {0}'.format(api_response) ) else: return None
ValueError
dataset/ETHPy150Open saltstack/salt/salt/modules/serverdensity_device.py/delete
def ls(**params): ''' List devices in Server Density Results will be filtered by any params passed to this function. For more information, see the API docs on listing_ and searching_. .. _listing: https://apidocs.serverdensity.com/Inventory/Devices/Listing .. _searching: https://apidocs.serverdensity.com/Inventory/Devices/Searching CLI Example: .. code-block:: bash salt '*' serverdensity_device.ls salt '*' serverdensity_device.ls name=lama salt '*' serverdensity_device.ls name=lama group=lama_band installedRAM=32768 ''' params = _clean_salt_variables(params) endpoint = 'devices' # Change endpoint if there are params to filter by: if params: endpoint = 'resources' # Convert all ints to strings: for key, val in six.iteritems(params): params[key] = str(val) api_response = requests.get( 'https://api.serverdensity.io/inventory/{0}'.format(endpoint), params={'token': get_sd_auth('api_token'), 'filter': json.dumps(params)} ) log.debug('Server Density API Response: {0}'.format(api_response)) log.debug('Server Density API Response content: {0}'.format(api_response.content)) if api_response.status_code == 200: try: return json.loads(api_response.content) except __HOLE__: log.error( 'Could not parse Server Density API Response content: {0}' .format(api_response.content) ) raise CommandExecutionError( 'Failed to create, Server Density API Response: {0}' .format(api_response) ) else: return None
ValueError
dataset/ETHPy150Open saltstack/salt/salt/modules/serverdensity_device.py/ls
def update(device_id, **params): ''' Updates device information in Server Density. For more information see the `API docs`__. .. __: https://apidocs.serverdensity.com/Inventory/Devices/Updating CLI Example: .. code-block:: bash salt '*' serverdensity_device.update 51f7eafcdba4bb235e000ae4 name=lama group=lama_band salt '*' serverdensity_device.update 51f7eafcdba4bb235e000ae4 name=better_lama group=rock_lamas swapSpace=512 ''' params = _clean_salt_variables(params) api_response = requests.put( 'https://api.serverdensity.io/inventory/devices/' + device_id, params={'token': get_sd_auth('api_token')}, data=params ) log.debug('Server Density API Response: {0}'.format(api_response)) log.debug('Server Density API Response content: {0}'.format(api_response.content)) if api_response.status_code == 200: try: return json.loads(api_response.content) except __HOLE__: log.error( 'Could not parse Server Density API Response content: {0}' .format(api_response.content) ) raise CommandExecutionError( 'Failed to create, API Response: {0}'.format(api_response) ) else: return None
ValueError
dataset/ETHPy150Open saltstack/salt/salt/modules/serverdensity_device.py/update
def get_win_certfile(): global _wincerts if _wincerts is not None: return _wincerts.name try: from wincertstore import CertFile except __HOLE__: return None class MyCertFile(CertFile): def __init__(self, stores=(), certs=()): CertFile.__init__(self) for store in stores: self.addstore(store) self.addcerts(certs) atexit.register(self.close) _wincerts = MyCertFile(stores=['CA', 'ROOT']) return _wincerts.name
ImportError
dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/setuptools/ssl_support.py/get_win_certfile
def find_ca_bundle(): """Return an existing CA bundle path, or None""" if os.name=='nt': return get_win_certfile() else: for cert_path in cert_paths: if os.path.isfile(cert_path): return cert_path try: return pkg_resources.resource_filename('certifi', 'cacert.pem') except (__HOLE__, ResolutionError, ExtractionError): return None
ImportError
dataset/ETHPy150Open GeekTrainer/Flask/Work/Trivia - Module 5/env/Lib/site-packages/setuptools/ssl_support.py/find_ca_bundle
def profileFormulaMenuCommand(cntlr): # save DTS menu item has been invoked if cntlr.modelManager is None or cntlr.modelManager.modelXbrl is None: cntlr.addToLog("No taxonomy loaded.") return # get file name into which to save log file while in foreground thread profileReportFile = cntlr.uiFileDialog("save", title=_("arelle - Save Formula Profile Report"), initialdir=cntlr.config.setdefault("formulaProfileReportDir","."), filetypes=[(_("Profile report file .log"), "*.log")], defaultextension=".log") if not profileReportFile: return False errMsg = "" maxRunTime = 0 while (1): timeout = simpledialog.askstring(_("arelle - Set formula run time limit"), _("{0}You may enter the maximum number of minutes to run formulas.\n" "(Leave empty for no run time limitation.)".format(errMsg)), parent=cntlr.parent) if timeout: try: maxRunTime = float(timeout) break except __HOLE__ as err: errMsg = str(err) + "\n\n" excludeCompileTime = messagebox.askyesno(_("arelle - Exclude formula compile statistics"), _("Should formula compiling be excluded from the statistics?\n" "(Yes will make a separate compiling \"pass\" so that statistics include execution only.)".format(errMsg)), parent=cntlr.parent) cntlr.config["formulaProfileReportDir"] = os.path.dirname(profileReportFile) cntlr.saveConfig() # perform validation and profiling on background thread import threading thread = threading.Thread(target=lambda c=cntlr, f=profileReportFile, t=maxRunTime, e=excludeCompileTime: backgroundProfileFormula(c,f,t,e)) thread.daemon = True thread.start()
ValueError
dataset/ETHPy150Open Arelle/Arelle/arelle/plugin/profileFormula.py/profileFormulaMenuCommand
def _check_extension(self, extension): """Checks for required methods in extension objects.""" try: LOG.debug('Ext name: %s', extension.get_name()) LOG.debug('Ext alias: %s', extension.get_alias()) LOG.debug('Ext description: %s', extension.get_description()) LOG.debug('Ext updated: %s', extension.get_updated()) except __HOLE__: LOG.exception(_LE("Exception loading extension")) return False return isinstance(extension, ExtensionDescriptor)
AttributeError
dataset/ETHPy150Open openstack/neutron/neutron/api/extensions.py/ExtensionManager._check_extension
def get_plugin_supported_extension_aliases(self, plugin): """Return extension aliases supported by a given plugin""" aliases = set() # we also check all classes that the plugins inherit to see if they # directly provide support for an extension for item in [plugin] + plugin.__class__.mro(): try: aliases |= set( getattr(item, "supported_extension_aliases", [])) except __HOLE__: # we land here if a class has a @property decorator for # supported extension aliases. They only work on objects. pass return aliases
TypeError
dataset/ETHPy150Open openstack/neutron/neutron/api/extensions.py/PluginAwareExtensionManager.get_plugin_supported_extension_aliases
def get_extensions_path(service_plugins=None): paths = collections.OrderedDict() # Add Neutron core extensions paths[neutron.extensions.__path__[0]] = 1 if service_plugins: # Add Neutron *-aas extensions for plugin in service_plugins.values(): neutron_mod = provider_configuration.NeutronModule( plugin.__module__.split('.')[0]) try: paths[neutron_mod.module().extensions.__path__[0]] = 1 except __HOLE__: # Occurs normally if module has no extensions sub-module pass # Add external/other plugins extensions if cfg.CONF.api_extensions_path: for path in cfg.CONF.api_extensions_path.split(":"): paths[path] = 1 LOG.debug("get_extension_paths = %s", paths) # Re-build the extension string path = ':'.join(paths) return path
AttributeError
dataset/ETHPy150Open openstack/neutron/neutron/api/extensions.py/get_extensions_path
def authenticate(self, username, password): user = None # try Debian PAM module (PyPAM) try: auth = PAM.pam() # pam callback def pam_conv(auth, query_list, userData): response = [] for i in range(len(query_list)): query, type = query_list[i] if type == PAM.PAM_PROMPT_ECHO_ON: val = raw_input(query) response.append((val, 0)) elif type == PAM.PAM_PROMPT_ECHO_OFF: response.append((password, 0)) elif type == PAM.PAM_PROMPT_ERROR_MSG or type == PAM.PAM_PROMPT_TEXT_INFO: response.append(('', 0)) else: return None return response auth.start(self.PAM_SERVICE) auth.set_item(PAM.PAM_USER, username) auth.set_item(PAM.PAM_CONV, pam_conv) try: auth.authenticate() auth.acct_mgmt() user = { 'username': username, 'name': username, 'su': 'Yes' } except PAM.error: pass except __HOLE__: p = pam_m if p.authenticate(username, password, service=self.PAM_SERVICE): user = { 'username': username, 'name': username, 'su': 'Yes' } return user
NameError
dataset/ETHPy150Open claudyus/LXC-Web-Panel/lwp/authenticators/pam.py/pam.authenticate
def updateLabelWidget(self): try: self.myInteractionProgressBar.setVisible(False) self.parent.labelWidget.repaint() except __HOLE__: pass
IndexError
dataset/ETHPy150Open ilastik/ilastik-0.5/ilastik/modules/classification/gui/guiThreads.py/ClassificationInteractive.updateLabelWidget
def send(self, response_decoder=None): """ Creates and sends a request to the OAuth server, decodes the response and returns the resulting token object. response_decoder - A custom callable can be supplied to override the default method of extracting AccessToken parameters from the response. This is necessary for server implementations which do not conform to the more recent OAuth2 specification (ex: Facebook). By default, this will assume the response is encoded using JSON. The callable should return a dictionary with keys and values as follows: access_token - The access token token_type - The token type expires_in - The number of seconds in which the token expires refresh_token - The refresh token scope - The permission scope (as a space delimited string) """ decoder = loads if response_decoder is not None and callable(response_decoder): decoder = response_decoder request = self.build_url_request() try: f = urlopen(request) except __HOLE__ as e: try: error_resp = e.read() error_data = loads(error_resp) except Exception: raise AccessTokenResponseError('Access request returned an error, but the response could not be read: %s ' % error_resp) if error_data.get('error') is None: raise AccessTokenResponseError('Access request returned an error, but did not include an error code') raise AccessTokenRequestError(error_data['error'], error_data.get('error_description'), error_data.get('error_uri')) token_data = decoder(f.read()) return self._create_access_token(token_data)
HTTPError
dataset/ETHPy150Open ryanhorn/tyoiOAuth2/tyoi/oauth2/__init__.py/AccessTokenRequest.send
def try_utf8_decode(value): """Try to decode an object. :param value: :return: """ if not is_string(value): return value elif PYTHON3 and not isinstance(value, bytes): return value elif not PYTHON3 and not isinstance(value, unicode): return value try: return value.decode('utf-8') except (UnicodeEncodeError, __HOLE__): pass return value
AttributeError
dataset/ETHPy150Open eandersson/amqpstorm/amqpstorm/compatibility.py/try_utf8_decode
def joined(self, a, b): """ Returns True if a and b are members of the same set. """ mapping = self._mapping try: return mapping[a] is mapping[b] except __HOLE__: return False
KeyError
dataset/ETHPy150Open tanghaibao/jcvi/utils/grouper.py/Grouper.joined
def match(self, response): try: self._actual = response.json except __HOLE__: self._actual = json.loads(response.data) return self._actual == self._expected
AttributeError
dataset/ETHPy150Open obmarg/flask-should-dsl/flask_should_dsl/matchers.py/JsonMatcher.match
def __call__(self, *pargs): if len(pargs) == 1: # One argument - this is either just the header name, # or the full header text expected = pargs[0].split(':') elif len(pargs) == 2: # Two arguments - should be header name & header value expected = pargs else: raise Exception('has_header accepts one or two arguments') self._expected_name = expected[0] try: self._expected_value = expected[1].strip() self._check_value = True except __HOLE__: self._check_value = False return self
IndexError
dataset/ETHPy150Open obmarg/flask-should-dsl/flask_should_dsl/matchers.py/HeaderMatcher.__call__
def global_env(): """Gets the global Elo environment.""" try: global_env.__elo__ except __HOLE__: # setup the default environment setup() return global_env.__elo__
AttributeError
dataset/ETHPy150Open sublee/elo/elo.py/global_env
@handle_request_errors def run(self): params = self.args.get('<params>') if params == '-': params = sys.stdin.read() body = json.loads(params) try: timeout = float(self.args.get('--timeout')) except __HOLE__: print("--timeout requires a number (e.g. --timeout=0.42)") return 1 subject, version = parse_versioned_name(self.args['<subject>']) address = self.args.get('--address') if not address: address = subject.rsplit('.', 1)[0] client = Client.from_config(self.config) def request(): trace.set_id(self.args.get('--trace-id')) return client.request(address, subject, body, timeout=timeout, version=version) N, C = int(self.args['-N']), int(self.args['-C']) if N == 1: return self._run_one_request(request) else: return self._run_many_requests(request, N, C)
ValueError
dataset/ETHPy150Open deliveryhero/lymph/lymph/cli/request.py/RequestCommand.run
def download(self): self._printStartDownloadMessage() response = requests.get(self.url, stream=True) response.raise_for_status() try: contentLength = int(response.headers['content-length']) self.fileSize = contentLength except __HOLE__: # chunked transfer encoding pass with open(self.path, 'wb') as outputFile: for chunk in response.iter_content(chunk_size=self.chunkSize): self.bytesReceived += self.chunkSize self._updateDisplay() outputFile.write(chunk) self._cleanUp()
KeyError
dataset/ETHPy150Open ga4gh/server/scripts/utils.py/HttpFileDownloader.download