function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|
def main():
"""
The entry point for the script. This script is fairly basic. Here is a
quick example of how to use it::
django_test_runner.py [path-to-app]
You must have Django on the PYTHONPATH prior to running this script. This
script basically will bootstrap a Django environment for you.
By default this script with use SQLite and an in-memory database. If you
are using Python 2.5 it will just work out of the box for you.
"""
parser = OptionParser()
parser.add_option("--DATABASE_ENGINE", dest="DATABASE_ENGINE", default="sqlite3")
parser.add_option("--DATABASE_NAME", dest="DATABASE_NAME", default="")
parser.add_option("--DATABASE_USER", dest="DATABASE_USER", default="")
parser.add_option("--DATABASE_PASSWORD", dest="DATABASE_PASSWORD", default="")
parser.add_option("--SITE_ID", dest="SITE_ID", type="int", default=1)
options, args = parser.parse_args()
# check for app in args
try:
app_path = args[0]
except __HOLE__:
print "You did not provide an app path."
raise SystemExit
else:
if app_path.endswith("/"):
app_path = app_path[:-1]
parent_dir, app_name = os.path.split(app_path)
sys.path.insert(0, parent_dir)
settings.configure(**{
"DATABASE_ENGINE": options.DATABASE_ENGINE,
"DATABASE_NAME": options.DATABASE_NAME,
"DATABASE_USER": options.DATABASE_USER,
"DATABASE_PASSWORD": options.DATABASE_PASSWORD,
"SITE_ID": options.SITE_ID,
"ROOT_URLCONF": "",
"TEMPLATE_LOADERS": (
"django.template.loaders.filesystem.load_template_source",
"django.template.loaders.app_directories.load_template_source",
),
"TEMPLATE_DIRS": (
os.path.join(os.path.dirname(__file__), "templates"),
),
"INSTALLED_APPS": (
# HACK: the admin app should *not* be required. Need to spend some
# time looking into this. Django #8523 has a patch for this issue,
# but was wrongly attached to that ticket. It should have its own
# ticket.
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
app_name,
),
})
call_command("test") | IndexError | dataset/ETHPy150Open ericholscher/django-test-utils/test_utils/bin/django_test_runner.py/main |
def get_docker_client():
"""
Try to fire up boot2docker and set any environmental variables
"""
# For Mac
try:
# Get boot2docker info (will fail if not Mac)
process = ['boot2docker', 'info']
p = subprocess.Popen(process, stdout=PIPE)
boot2docker_info = json.loads(p.communicate()[0])
# Defaults
docker_host = ''
docker_cert_path = ''
docker_tls_verify = ''
# Start the boot2docker VM if it is not already running
if boot2docker_info['State'] != "running":
print('Starting Boot2Docker VM:')
# Start up the Docker VM
process = ['boot2docker', 'start']
subprocess.call(process)
if ('DOCKER_HOST' not in os.environ) or ('DOCKER_CERT_PATH' not in os.environ) or ('DOCKER_TLS_VERIFY' not in os.environ):
# Get environmental variable values
process = ['boot2docker', 'shellinit']
p = subprocess.Popen(process, stdout=PIPE)
boot2docker_envs = p.communicate()[0].split()
for env in boot2docker_envs:
if 'DOCKER_HOST' in env:
docker_host = env.split('=')[1]
elif 'DOCKER_CERT_PATH' in env:
docker_cert_path = env.split('=')[1]
elif 'DOCKER_TLS_VERIFY' in env:
docker_tls_verify = env.split('=')[1]
# Set environmental variables
os.environ['DOCKER_TLS_VERIFY'] = docker_tls_verify
os.environ['DOCKER_HOST'] = docker_host
os.environ['DOCKER_CERT_PATH'] = docker_cert_path
else:
# Handle case when boot2docker is already running
docker_host = os.environ['DOCKER_HOST'].split('=')[1]
# Get the arguments form the environment
client_kwargs = kwargs_from_env(assert_hostname=False)
client_kwargs['version'] = MINIMUM_API_VERSION
# Find the right version of the API by creating a DockerClient with the minimum working version
# Then test to see if the Docker is running a later version than the minimum
# See: https://github.com/docker/docker-py/issues/439
version_client = DockerClient(**client_kwargs)
client_kwargs['version'] = get_api_version(MAX_CLIENT_DOCKER_API_VERSION, version_client.version()['ApiVersion'])
# Create Real Docker client
docker_client = DockerClient(**client_kwargs)
# Derive the host address only from string formatted: "tcp://<host>:<port>"
docker_client.host = docker_host.split(':')[1].strip('//')
return docker_client
# For Linux
except __HOLE__:
# Find the right version of the API by creating a DockerClient with the minimum working version
# Then test to see if the Docker is running a later version than the minimum
# See: https://github.com/docker/docker-py/issues/439
version_client = DockerClient(base_url='unix://var/run/docker.sock', version=MINIMUM_API_VERSION)
version = get_api_version(MAX_CLIENT_DOCKER_API_VERSION, version_client.version()['ApiVersion'])
docker_client = DockerClient(base_url='unix://var/run/docker.sock', version=version)
docker_client.host = DEFAULT_DOCKER_HOST
return docker_client
except:
raise | OSError | dataset/ETHPy150Open tethysplatform/tethys/tethys_apps/cli/docker_commands.py/get_docker_client |
def stop_boot2docker():
"""
Shut down boot2docker if applicable
"""
try:
process = ['boot2docker', 'stop']
subprocess.call(process)
print('Boot2Docker VM Stopped')
except __HOLE__:
pass
except:
raise | OSError | dataset/ETHPy150Open tethysplatform/tethys/tethys_apps/cli/docker_commands.py/stop_boot2docker |
def start_docker_containers(docker_client, container=None):
"""
Start Docker containers
"""
# Perform check
container_check(docker_client, container=container)
# Get container dicts
container_status = get_docker_container_status(docker_client)
# Start PostGIS
try:
if not container_status[POSTGIS_CONTAINER] and (not container or container == POSTGIS_INPUT):
print('Starting PostGIS container...')
docker_client.start(container=POSTGIS_CONTAINER,
restart_policy='always',
port_bindings={5432: DEFAULT_POSTGIS_PORT})
elif not container or container == POSTGIS_INPUT:
print('PostGIS container already running...')
except KeyError:
if not container or container == POSTGIS_INPUT:
print('PostGIS container not installed...')
except:
raise
try:
if not container_status[GEOSERVER_CONTAINER] and (not container or container == GEOSERVER_INPUT):
# Start GeoServer
print('Starting GeoServer container...')
docker_client.start(container=GEOSERVER_CONTAINER,
restart_policy='always',
port_bindings={8080: DEFAULT_GEOSERVER_PORT})
elif not container or container == GEOSERVER_INPUT:
print('GeoServer container already running...')
except __HOLE__:
if not container or container == GEOSERVER_INPUT:
print('GeoServer container not installed...')
except:
raise
try:
if not container_status[N52WPS_CONTAINER] and (not container or container == N52WPS_INPUT):
# Start 52 North WPS
print('Starting 52 North WPS container...')
docker_client.start(container=N52WPS_CONTAINER,
restart_policy='always',
port_bindings={8080: DEFAULT_N52WPS_PORT})
elif not container or container == N52WPS_INPUT:
print('52 North WPS container already running...')
except KeyError:
if not container or container == N52WPS_INPUT:
print('52 North WPS container not installed...')
except:
raise | KeyError | dataset/ETHPy150Open tethysplatform/tethys/tethys_apps/cli/docker_commands.py/start_docker_containers |
def stop_docker_containers(docker_client, silent=False, container=None):
"""
Stop Docker containers
"""
# Perform check
container_check(docker_client, container=container)
# Get container dicts
container_status = get_docker_container_status(docker_client)
# Stop PostGIS
try:
if container_status[POSTGIS_CONTAINER] and (not container or container == POSTGIS_INPUT):
if not silent:
print('Stopping PostGIS container...')
docker_client.stop(container=POSTGIS_CONTAINER)
elif not silent and (not container or container == POSTGIS_INPUT):
print('PostGIS container already stopped.')
except KeyError:
if not container or container == POSTGIS_INPUT:
print('PostGIS container not installed...')
except:
raise
# Stop GeoServer
try:
if container_status[GEOSERVER_CONTAINER] and (not container or container == GEOSERVER_INPUT):
if not silent:
print('Stopping GeoServer container...')
docker_client.stop(container=GEOSERVER_CONTAINER)
elif not silent and (not container or container == GEOSERVER_INPUT):
print('GeoServer container already stopped.')
except __HOLE__:
if not container or container == GEOSERVER_INPUT:
print('GeoServer container not installed...')
except:
raise
# Stop 52 North WPS
try:
if container_status[N52WPS_CONTAINER] and (not container or container == N52WPS_INPUT):
if not silent:
print('Stopping 52 North WPS container...')
docker_client.stop(container=N52WPS_CONTAINER)
elif not silent and (not container or container == N52WPS_INPUT):
print('52 North WPS container already stopped.')
except KeyError:
if not container or container == N52WPS_INPUT:
print('52 North WPS container not installed...')
except:
raise | KeyError | dataset/ETHPy150Open tethysplatform/tethys/tethys_apps/cli/docker_commands.py/stop_docker_containers |
def docker_ip():
"""
Returns the hosts and ports of the Docker containers.
"""
# Retrieve a Docker client
docker_client = get_docker_client()
# Containers
containers = get_docker_container_dicts(docker_client)
container_status = get_docker_container_status(docker_client)
docker_host = docker_client.host
# PostGIS
try:
if container_status[POSTGIS_CONTAINER]:
postgis_container = containers[POSTGIS_CONTAINER]
postgis_port = postgis_container['Ports'][0]['PublicPort']
print('\nPostGIS/Database:')
print(' Host: {0}'.format(docker_host))
print(' Port: {0}'.format(postgis_port))
else:
print('PostGIS/Database: Not Running.')
except KeyError:
# If key error is raised, it is likely not installed.
print('PostGIS/Database: Not Installed.')
except:
raise
# GeoServer
try:
if container_status[GEOSERVER_CONTAINER]:
geoserver_container = containers[GEOSERVER_CONTAINER]
geoserver_port = geoserver_container['Ports'][0]['PublicPort']
print('\nGeoServer:')
print(' Host: {0}'.format(docker_host))
print(' Port: {0}'.format(geoserver_port))
print(' Endpoint: http://{0}:{1}/geoserver/rest'.format(docker_host, geoserver_port))
else:
print('GeoServer: Not Running.')
except __HOLE__:
# If key error is raised, it is likely not installed.
print('GeoServer: Not Installed.')
except:
raise
# 52 North WPS
try:
if container_status[N52WPS_CONTAINER]:
n52wps_container = containers[N52WPS_CONTAINER]
n52wps_port = n52wps_container['Ports'][0]['PublicPort']
print('\n52 North WPS:')
print(' Host: {0}'.format(docker_host))
print(' Port: {0}'.format(n52wps_port))
print(' Endpoint: http://{0}:{1}/wps/WebProcessingService\n'.format(docker_host, n52wps_port))
else:
print('52 North WPS: Not Running.')
except KeyError:
# If key error is raised, it is likely not installed.
print('52 North WPS: Not Installed.')
except:
raise | KeyError | dataset/ETHPy150Open tethysplatform/tethys/tethys_apps/cli/docker_commands.py/docker_ip |
def init_from_registered_applications(self, request):
created_items = []
for backend in CostTrackingRegister.get_registered_backends():
try:
items = backend.get_default_price_list_items()
except __HOLE__:
continue
with transaction.atomic():
for item in items:
item, created = models.DefaultPriceListItem.objects.update_or_create(
resource_content_type=item.resource_content_type,
item_type=item.item_type,
key=item.key,
defaults={
'value': item.value,
'name': '{}: {}'.format(item.item_type, item.key),
'metadata': item.metadata,
'units': item.units
}
)
if created:
created_items.append(item)
if created_items:
message = ungettext(
'Price item was created: {}'.format(created_items[0].name),
'Price items were created: {}'.format(', '.join(item.name for item in created_items)),
len(created_items)
)
self.message_user(request, message)
else:
self.message_user(request, "Price items for all registered applications have been updated")
return redirect(reverse('admin:cost_tracking_defaultpricelistitem_changelist')) | NotImplementedError | dataset/ETHPy150Open opennode/nodeconductor/nodeconductor/cost_tracking/admin.py/DefaultPriceListItemAdmin.init_from_registered_applications |
def _get_patch(self, request, *args, **kwargs):
try:
review_request = \
resources.review_request.get_object(request, *args, **kwargs)
diffset = self.get_object(request, *args, **kwargs)
except __HOLE__:
return DOES_NOT_EXIST
tool = review_request.repository.get_scmtool()
data = tool.get_parser('').raw_diff(diffset)
resp = HttpResponse(data, content_type='text/x-patch')
if diffset.name == 'diff':
filename = 'bug%s.patch' % \
review_request.bugs_closed.replace(',', '_')
else:
filename = diffset.name
resp['Content-Disposition'] = 'inline; filename=%s' % filename
set_last_modified(resp, diffset.timestamp)
return resp | ObjectDoesNotExist | dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/webapi/resources/diff.py/DiffResource._get_patch |
@webapi_login_required
@webapi_check_local_site
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
allow_unknown=True
)
def update(self, request, extra_fields={}, *args, **kwargs):
"""Updates a diff.
This is used solely for updating extra data on a diff. The contents
of a diff cannot be modified.
Extra data can be stored on the diff for later lookup by passing
``extra_data.key_name=value``. The ``key_name`` and ``value`` can
be any valid strings. Passing a blank ``value`` will remove the key.
The ``extra_data.`` prefix is required.
"""
try:
review_request = \
resources.review_request.get_object(request, *args, **kwargs)
diffset = self.get_object(request, *args, **kwargs)
except __HOLE__:
return DOES_NOT_EXIST
if not review_request.is_mutable_by(request.user):
return self.get_no_access_error(request)
if extra_fields:
self.import_extra_data(diffset, diffset.extra_data, extra_fields)
diffset.save(update_fields=['extra_data'])
return 200, {
self.item_result_key: diffset,
} | ObjectDoesNotExist | dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/webapi/resources/diff.py/DiffResource.update |
def wal_archive(self, wal_path, concurrency=1):
"""
Uploads a WAL file to S3 or Windows Azure Blob Service
This code is intended to typically be called from Postgres's
archive_command feature.
"""
# Upload the segment expressly indicated. It's special
# relative to other uploads when parallel wal-push is enabled,
# in that it's not desirable to tweak its .ready/.done files
# in archive_status.
xlog_dir = os.path.dirname(wal_path)
segment = WalSegment(wal_path, explicit=True)
uploader = WalUploader(self.layout, self.creds, self.gpg_key_id)
group = WalTransferGroup(uploader)
group.start(segment)
# Upload any additional wal segments up to the specified
# concurrency by scanning the Postgres archive_status
# directory.
started = 1
seg_stream = WalSegment.from_ready_archive_status(xlog_dir)
while started < concurrency:
try:
other_segment = seg_stream.next()
except __HOLE__:
break
if other_segment.path != wal_path:
group.start(other_segment)
started += 1
try:
# Wait for uploads to finish.
group.join()
except EnvironmentError as e:
if e.errno == errno.ENOENT:
print e
raise UserException(
msg='could not find file for wal-push',
detail=('The operating system reported: {0} {1}'
.format(e.strerror, repr(e.filename))))
raise | StopIteration | dataset/ETHPy150Open wal-e/wal-e/wal_e/operator/backup.py/Backup.wal_archive |
def test_get_language_title(self):
"""Test get_language_title utility function"""
language_code = 'en'
self.assertEqual(get_language_title(language_code), 'English')
# Test the case where requested language is not in settings.
# We can not override settings, since languages in get_language_title()
# are initialised during import. So, we use fictional language code.
language_code = 'xx'
try:
self.assertEqual(get_language_title(language_code), language_code)
except __HOLE__:
self.fail(
"get_language_title() raises KeyError for missing language") | KeyError | dataset/ETHPy150Open edoburu/django-parler/parler/tests/test_utils.py/UtilTestCase.test_get_language_title |
@login_required
def callback(request):
"""
Step 2 of OAuth: fetch the token.
"""
try:
oauth_state = request.session['oauth_state']
except __HOLE__:
return HttpResponseBadRequest('Missing oauth state.')
github = OAuth2Session(settings.GITHUB_CLIENT_ID, state=oauth_state)
token = github.fetch_token(
settings.GITHUB_TOKEN_URL,
client_secret=settings.GITHUB_CLIENT_SECRET,
authorization_response=request.build_absolute_uri()
)
try:
OAuthToken.objects.create(user=request.user, value=token['access_token'])
except (KeyError, TypeError):
return HttpResponseBadRequest('Cannot read access_token.')
Repository.add_user_to_known_repositories(request.user)
return redirect("home") | KeyError | dataset/ETHPy150Open m-vdb/github-buildservice-boilerplate/buildservice/views/oauth.py/callback |
def load(self, fname, iszip=True):
if sys.version_info[0] == 3:
fname = fname + '.3'
if not iszip:
d = marshal.load(open(fname, 'rb'))
else:
try:
f = gzip.open(fname, 'rb')
d = marshal.loads(f.read())
except __HOLE__:
f = open(fname, 'rb')
d = marshal.loads(f.read())
f.close()
for k, v in d.items():
if isinstance(self.__dict__[k], set):
self.__dict__[k] = set(v)
elif hasattr(self.__dict__[k], '__dict__'):
self.__dict__[k].__dict__ = v
else:
self.__dict__[k] = v | IOError | dataset/ETHPy150Open isnowfy/snownlp/snownlp/utils/tnt.py/TnT.load |
def test_correct_json_validation(self):
"""Tests that when a JSON snippet is incorrect, an error must be raised.
"""
try:
validate_json('{"id":1,"name":"foo","interest":["django","django ERP"]}')
self.assertTrue(True)
except __HOLE__:
self.assertFalse(True) | ValidationError | dataset/ETHPy150Open django-erp/django-erp/djangoerp/core/tests.py/JSONValidationCase.test_correct_json_validation |
def test_incorrect_json_validation(self):
"""Tests that when a JSON snippet is incorrect, an error must be raised.
"""
try:
# The snippet is incorrect due to the double closed square bracket.
validate_json('{"id":1,"name":"foo","interest":["django","django ERP"]]}')
self.assertFalse(True)
except __HOLE__:
self.assertTrue(True) | ValidationError | dataset/ETHPy150Open django-erp/django-erp/djangoerp/core/tests.py/JSONValidationCase.test_incorrect_json_validation |
def getNumLines(self, fileName):
try:
reader = csv.reader(open(fileName, "rU"))
except __HOLE__:
raise
numLines = 0
for row in reader:
numLines = numLines + 1
return numLines | IOError | dataset/ETHPy150Open charanpald/APGL/apgl/io/CsvReader.py/CsvReader.getNumLines |
def test_using_keyword(self):
self.assert_count(self.model, 0)
self.assert_count(self.model, 0, using='legacy')
self.assert_create(self.model, using='legacy', **self.kwargs)
self.assert_count(self.model, 0)
self.assert_not_count(self.model, 0, using='legacy')
self.assert_count(self.model, 1, using='legacy')
for key, value in self.kwargs.items():
self.assert_not_read(self.model, **{key: value})
for key, value in self.kwargs.items():
self.assert_read(self.model, using='legacy', **{key: value})
try:
self.assert_update(self.model, **self.sgrawk)
except __HOLE__:
pass
else:
assert False, 'Any %r model should be exist in default ' \
'database.' % self.model
self.assert_update(self.model, using='legacy', **self.sgrawk)
self.assert_not_read(self.model, **self.kwargs)
self.assert_not_read(self.model, using='legacy', **self.kwargs)
self.assert_delete(self.model)
self.assert_delete(self.model, using='legacy')
self.assert_count(self.model, 0)
self.assert_count(self.model, 0, using='legacy') | AssertionError | dataset/ETHPy150Open playpauseandstop/tddspry/tests/testproject/multidb/tests/test_models.py/TestModels.test_using_keyword |
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.realpath(__file__)
except __HOLE__:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full} | NameError | dataset/ETHPy150Open bokeh/bokeh/bokeh/_version.py/versions_from_vcs |
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.realpath(__file__)
except __HOLE__:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.realpath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""} | NameError | dataset/ETHPy150Open bokeh/bokeh/bokeh/_version.py/versions_from_parentdir |
def main(argv):
logging.config.fileConfig(open("logging.ini"))
path = argv[0]
files_start = getfiles(path)
time.sleep(0.1)
files_end = getfiles(path)
# Find the files which haven't changed size.
unchanged = {}
for filename, start_size in files_start.iteritems():
try:
end_size = files_end[filename]
except __HOLE__:
continue
if start_size == end_size:
unchanged[filename] = end_size
logging.info("Found %s files to possibly upload (%s changing)",
len(unchanged), len(files_end)-len(unchanged))
# Connect to youtube's SFTP server
sftp = pexpect.spawn("sftp -oIdentityFile=%s %s@%s" % (
PRIVATE_KEY, USERNAME.lower(), HOST))
sftp.expect([PROMPT, SURE])
logging.info('First line for start %r %r', sftp.before, sftp.after)
if sftp.after[:5] == SURE[:5]:
logging.info('Never seen %s before, accepting SSH host key.' % HOST)
sftp.sendline('yes')
sftp.expect(PROMPT)
# open a SFTP channel
sftp_dirs = []
for dirname, size in sftp_listdir(sftp, '/').items():
sftp_dirs.append(dirname)
for filename, size in unchanged.iteritems():
# Video files will be gigabytes in size
#if size < 1e9:
# print filename, "File too small", size
# continue
basename = filename[:filename.rfind('.')]
statusfile = "status-%s.xml" % basename
successfile = basename+'.uploaded'
# Has the file already been uploaded?
if os.path.exists(path+'/'+successfile):
logging.info("%s is already uploaded and processed.", filename)
continue
# Create the directory
if basename not in sftp_dirs:
sftp_mkdir(sftp, basename)
files = {}
for filename, size in sftp_listdir(sftp, basename).items():
files[filename] = size
# Check if the we have a completion status report
if statusfile in files:
logging.info("%s already uploaded and processed.", filename)
if statusfile not in unchanged:
sftp_get(sftp, basename+'/'+statusfile, path+'/'+statusfile, progress)
continue
if "delivery.complete" in files:
logging.info("%s already uploaded and awaiting processing.", filename)
continue
if os.path.exists(path+'/'+statusfile):
logging.info("%s is already uploaded and waiting status response.", filename)
continue
size = os.stat(path+'/'+filename).st_size
# Upload the actual video
upload = False
if filename not in files:
logging.info("%s does not exist on server.", filename)
upload = True
elif files[filename] != size:
logging.info(
"%s filesize differs remote: %s, local: %s.", filename, files[filename], size)
upload = True
success = False
# Create the metadata
if basename+".xml" not in files:
datestr = basename[basename.rfind('.')+1:]
date = time.strptime(datestr, "%Y%m%d-%H%M%S")
xmlfilename = basename+".xml"
info = guess_event(date)
info['date'] = datestr
info['humandate'] = time.strftime('%A, %d %B %Y', date)
info['filename'] = filename
info['username'] = USERNAME
info['password'] = PASSWORD
logging.info("Guessing the event is %s", info['shortname'])
data = """\
<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0"
xmlns:media="http://search.yahoo.com/mrss"
xmlns:yt="http://www.youtube.com/schemas/yt/0.2">
<channel>
<yt:notification_email>[email protected]</yt:notification_email>
<yt:account>
<yt:username>%(username)s</yt:username>
<yt:password>%(password)s</yt:password>
</yt:account>
<yt:owner_name>%(username)s</yt:owner_name>
<item>
<yt:action>Insert</yt:action>
<media:title>%(shortname)s - %(date)s</media:title>
<media:content url="file://%(filename)s" >
<media:description type="plain">Talk at %(name)s given on %(humandate)s</media:description>
<media:keywords>open source, google, sydney, %(tags)s</media:keywords>
<media:category>Science & Technology</media:category>
<media:rating scheme="urn:simple">nonadult</media:rating>
</media:content>
<yt:language>en</yt:language>
<yt:date_recorded>2005-08-01</yt:date_recorded>
<yt:location>
<yt:country>AU</yt:country>
<yt:zip_code>NSW 2009</yt:zip_code>
<yt:location_text>Pyrmont</yt:location_text>
</yt:location>
<yt:start_time>2007-07-07T07:07:07</yt:start_time> <!-- A date
in the past -->
<yt:community>
<yt:allow_comments>Always</yt:allow_comments>
<yt:allow_responses>Never</yt:allow_responses>
<yt:allow_ratings>true</yt:allow_ratings>
<yt:allow_embedding>true</yt:allow_embedding>
</yt:community>
</item>
</channel>
</rss>
""" % info
f = open(path+"/"+xmlfilename, "w")
f.write(data)
f.close()
logging.debug("XML Metatdata:\n%s", data)
sftp_write(sftp, basename+'/'+xmlfilename, data)
logging.info("%s uploaded metadata", xmlfilename)
if upload:
logging.info("%s uploading.", filename)
sftp_put(sftp, path+'/'+filename, basename+'/'+filename, progress)
logging.info("%s uploaded.", filename)
success = True
else:
success = True
if success:
deliveryname = "delivery.complete"
if deliveryname not in files:
logging.info("%s uploading completion file", deliveryname)
sftp_write(sftp, basename+'/'+deliveryname, '')
logging.info("%s uploading completion file", deliveryname)
f = open(path+'/'+successfile, 'w')
f.write(str(time.time()))
f.write('\n')
f.close()
logging.info('Upload done.') | KeyError | dataset/ETHPy150Open timvideos/streaming-system/tools/youtube/putvideo.py/main |
def evaluate(node, sphinxContext, value=False, fallback=None, hsBoundFact=False):
if isinstance(node, astNode):
if fallback is None:
result = evaluator[node.__class__.__name__](node, sphinxContext)
else:
try:
result = evaluator[node.__class__.__name__](node, sphinxContext)
except __HOLE__:
if sphinxContext.formulaOptions.traceVariableSetExpressionEvaluation:
sphinxContext.modelXbrl.info("sphinx:trace",
_("%(node)s has unbound evaluation"),
sourceFileLine=node.sourceFileLine, node=str(node))
return fallback
if sphinxContext.formulaOptions.traceVariableSetExpressionEvaluation:
sphinxContext.modelXbrl.info("sphinx:trace",
_("%(node)s evaluation: %(value)s"),
sourceFileLine=node.sourceFileLine, node=str(node), value=result)
if result is not None:
if isinstance(result, HyperspaceBinding):
if hsBoundFact: # return fact, not the value of fact
return result.yieldedFact
elif value:
return result.value
# dereference nodes to their value
if (value or hsBoundFact) and isinstance(result, astNode):
return evaluate(result, sphinxContext, value, fallback, hsBoundFact)
return result
return result
elif isinstance(node, (tuple,list)):
return [evaluate(item, sphinxContext, value, fallback, hsBoundFact)
for item in node]
elif isinstance(node, set):
return set(evaluate(item, sphinxContext, value, fallback, hsBoundFact)
for item in node)
else:
return node | StopIteration | dataset/ETHPy150Open Arelle/Arelle/arelle/plugin/sphinx/SphinxEvaluator.py/evaluate |
def evaluateBinaryOperation(node, sphinxContext):
leftValue = evaluate(node.leftExpr, sphinxContext, value=True, fallback=UNBOUND)
rightValue = evaluate(node.rightExpr, sphinxContext, value=True, fallback=UNBOUND)
op = node.op
if sphinxContext.formulaOptions.traceVariableExpressionEvaluation:
sphinxContext.modelXbrl.info("sphinx:trace",
_("Binary op %(op)s v1: %(leftValue)s, v2: %(rightValue)s"),
sourceFileLine=node.sourceFileLine, op=op, leftValue=leftValue, rightValue=rightValue)
if op == ":=":
if sphinxContext.ruleNode.bind == "left":
if rightValue is UNBOUND: raise StopIteration
elif sphinxContext.ruleNode.bind == "right":
if leftValue is UNBOUND: raise StopIteration
elif sphinxContext.ruleNode.bind == "either":
if leftValue is UNBOUND and rightValue is UNBOUND: raise StopIteration
else: # both or default
if leftValue is UNBOUND or rightValue is UNBOUND: raise StopIteration
return (leftValue, rightValue)
elif op in {"|+|", "|+", "+|", "+", "|-|", "|-", "-|", "-"}:
if leftValue is UNBOUND:
if op[0] == '|':
raise StopIteration
else:
leftValue = 0
if rightValue is UNBOUND:
if op[-1] == '|':
raise StopIteration
else:
rightValue = 0
else:
if leftValue is UNBOUND:
return UNBOUND
if rightValue is UNBOUND:
if op == "or" and leftValue:
return True
return UNBOUND
if op == "/" and rightValue == 0: # prevent divide by zero
return UNBOUND
try:
result = {'+': operator.add, '-': operator.sub, '*': operator.mul, '/': operator.truediv,
'<': operator.lt, '>': operator.gt, '<=': operator.le, '>=': operator.ge,
'==': operator.eq, '!=': operator.ne,
'and': operator.and_, 'or': operator.or_,
}[op](leftValue, rightValue)
return result
except __HOLE__:
sphinxContext.modelXbrl.error("sphinx:error",
_("Operation \"%(op)s\" not implemented for %(node)s"),
sourceFileLine=node.sourceFileLine, op=op, node=str(node))
except (TypeError, ZeroDivisionError) as err:
sphinxContext.modelXbrl.error("sphinx:error",
_("Operation \"%(op)s\" raises exception %(error)s for %(node)s"),
sourceFileLine=node.sourceFileLine, op=op, node=str(node), error=str(err))
return None | KeyError | dataset/ETHPy150Open Arelle/Arelle/arelle/plugin/sphinx/SphinxEvaluator.py/evaluateBinaryOperation |
def evaluateFunctionDeclaration(node, sphinxContext, args):
overriddenVariables = {}
if isinstance(args, dict):
# args may not all be used in the function declaration, just want used ones
argDict = dict((name, value)
for name, value in args.items()
if name in node.params)
else: # purely positional args
# positional parameters named according to function prototype
if len(args) != len(node.params):
sphinxContext.modelXbrl.log("ERROR", "sphinx.functionArgumentsMismatch",
_("Function %(name)s requires %(required)s parameters but %(provided)s are provided"),
sourceFileLine=node.sourceFileLine,
name=node.name, required=len(node.params), provided=len(args))
return None
argDict = dict((paramName, args[i])
for i, paramName in enumerate(node.params))
for name, value in argDict.items():
if name in sphinxContext.localVariables:
overriddenVariables[name] = sphinxContext.localVariables[name]
sphinxContext.localVariables[name] = value
def clearFunctionArgs():
for name in argDict.keys():
del sphinxContext.localVariables[name]
sphinxContext.localVariables.update(overriddenVariables)
overriddenVariables.clear()
try:
result = evaluate(node.expr, sphinxContext)
clearFunctionArgs()
return result
except __HOLE__ as ex:
clearFunctionArgs()
raise ex # reraise exception | StopIteration | dataset/ETHPy150Open Arelle/Arelle/arelle/plugin/sphinx/SphinxEvaluator.py/evaluateFunctionDeclaration |
def evaluateAggregateFunction(node, sphinxContext, name):
# determine if evaluating args found hyperspace (first time)
args = []
iterateAbove, bindingsLen = getattr(node, "aggregationHsBindings", (None, None))
firstTime = bindingsLen is None
hsBindings = sphinxContext.hyperspaceBindings
parentAggregationNode = hsBindings.aggregationNode
parentIsValuesIteration = hsBindings.isValuesIteration
hsBindings.aggregationNode = node # block removing nested aspect bindings
hsBindings.isValuesIteration = False
prevHsBindingsLen = len(hsBindings.hyperspaceBindings)
hsBoundFact = aggreateFunctionAcceptsFactArgs[name]
arg = node.args[0]
try:
while (True): # possibly multiple bindings
# evaluate local variables
for localVar in node.localVariables:
evaluate(localVar, sphinxContext)
value = evaluate(arg, sphinxContext, value=True, hsBoundFact=hsBoundFact)
if isinstance(value, (list,set)):
for listArg in value:
if value is not UNBOUND:
args.append(evaluate(listArg, sphinxContext, value=True))
elif value is not UNBOUND:
args.append(value)
if firstTime:
if len(hsBindings.hyperspaceBindings) == prevHsBindingsLen:
# no hs bindings, just scalar
break
else: # has hs bindings, evaluate rest of them
firstTime = False
iterateAbove = prevHsBindingsLen - 1
bindingsLen = len(hsBindings.hyperspaceBindings)
node.aggregationHsBindings = (iterateAbove, bindingsLen)
hsBindings.next(iterateAbove, bindingsLen)
except StopIteration:
pass # no more bindings
hsBindings.isValuesIteration = parentIsValuesIteration
hsBindings.aggregationNode = parentAggregationNode
# remove local variables
for localVar in node.localVariables:
if localVar in sphinxContext.localVariables:
del sphinxContext.localVariables[localVar.name]
if sphinxContext.formulaOptions.traceVariableExpressionEvaluation:
sphinxContext.modelXbrl.info("sphinx:trace",
_("Aggregative function %(name)s arguments: %(args)s"),
sourceFileLine=node.sourceFileLine, name=name,
args=",".join(str(a) for a in args))
try:
return aggreateFunctionImplementation[name](node, sphinxContext, args)
except (__HOLE__, ZeroDivisionError) as err:
sphinxContext.modelXbrl.error("sphinx:error",
_("Function %(name)s raises exception %(error)s in %(node)s"),
sourceFileLine=node.sourceFileLine, name=name, node=str(node), error=str(err))
return None | TypeError | dataset/ETHPy150Open Arelle/Arelle/arelle/plugin/sphinx/SphinxEvaluator.py/evaluateAggregateFunction |
def evaluateTagReference(node, sphinxContext):
try:
return sphinxContext.tags[node.name]
except __HOLE__:
raise SphinxException(node,
"sphinx:tagName",
_("unassigned tag name %(name)s"),
name=node.name ) | KeyError | dataset/ETHPy150Open Arelle/Arelle/arelle/plugin/sphinx/SphinxEvaluator.py/evaluateTagReference |
def evaluateRule(node, sphinxContext):
isFormulaRule = isinstance(node, astFormulaRule)
isReportRule = isinstance(node, astReportRule)
name = (node.name or ("sphinx.report" if isReportRule else "sphinx.raise"))
nodeId = node.nodeTypeName + ' ' + name
if node.precondition:
result = evaluate(node.precondition, sphinxContext, value=True)
if sphinxContext.formulaOptions.traceVariableSetExpressionResult:
sphinxContext.modelXbrl.info("sphinx:trace",
_("%(node)s precondition evaluation: %(value)s"),
sourceFileLine=node.sourceFileLine, node=nodeId, value=result)
if not result:
return None
# nest hyperspace binding
sphinxContext.ruleNode = node
hsBindings = None
ruleIteration = 0
try:
hsBindings = HyperspaceBindings(sphinxContext)
while True:
ruleIteration += 1
sphinxContext.dynamicSeverity = None
sphinxContext.tags.clear()
sphinxContext.localVariables.clear()
if sphinxContext.formulaOptions.traceVariableSetExpressionResult:
sphinxContext.modelXbrl.info("sphinx:trace",
_("%(node)s starting iteration %(iteration)s"),
sourceFileLine=node.sourceFileLine, node=nodeId, iteration=ruleIteration)
for varAssignNode in node.variableAssignments:
evaluateVariableAssignment(varAssignNode, sphinxContext)
result = evaluate(node.expr, sphinxContext, value=True)
if result is UNBOUND:
result = None # nothing to do for this pass
elif isFormulaRule:
left, right = result
if left is UNBOUND:
difference = UNBOUND
elif right is UNBOUND:
difference = UNBOUND
else:
difference = abs(left - right)
result = difference != 0
resultTags = {"left": left, "right": right, "difference": difference}
sphinxContext.dynamicSeverity = None
if node.severity in sphinxContext.functions:
evaluateFunctionDeclaration(sphinxContext.functions[node.severity],
sphinxContext,
{"difference": difference, "left": left, "right": right})
if sphinxContext.dynamicSeverity is None or sphinxContext.dynamicSeverity == "pass": # don't process pass
sphinxContext.dynamicSeverity = None
result = False
else:
if isReportRule:
resultTags = {"value": result}
else:
resultTags = {}
if sphinxContext.formulaOptions.traceVariableSetExpressionResult:
sphinxContext.modelXbrl.info("sphinx:trace",
_("%(node)s result %(result)s %(severity)s iteration %(iteration)s"),
sourceFileLine=node.sourceFileLine, node=nodeId, iteration=ruleIteration,
result=result,
severity=(sphinxContext.dynamicSeverity or node.severity or
("info" if isReportRule else "error")))
if ((result or isReportRule) or
(sphinxContext.dynamicSeverity and sphinxContext.dynamicSeverity != "pass")):
severity = (sphinxContext.dynamicSeverity or node.severity or
("info" if isReportRule else "error"))
if isinstance(severity, astFunctionReference):
severity = severity.name
logSeverity = {"error" : "ERROR", "warning": "WARNING", "info": "INFO"}[severity]
if node.message:
sphinxContext.modelXbrl.log(logSeverity, name,
evaluateMessage(node.message, sphinxContext, resultTags, hsBindings),
sourceFileLine=[node.sourceFileLine] +
[(fact.modelDocument.uri, fact.sourceline) for fact in hsBindings.boundFacts],
severity=severity)
elif isFormulaRule:
sphinxContext.modelXbrl.log(logSeverity,
name,
_("Formula %(severity)s difference %(value)s for %(aspects)s"),
sourceFileLine=[node.sourceFileLine] +
[(fact.modelDocument.uri, fact.sourceline) for fact in hsBindings.boundFacts],
severity=severity,
value=difference,
aspects=contextView(sphinxContext))
elif isReportRule:
sphinxContext.modelXbrl.log(logSeverity,
name,
_("Report %(severity)s %(value)s for %(aspects)s"),
sourceFileLine=[node.sourceFileLine] +
[(fact.modelDocument.uri, fact.sourceline) for fact in hsBindings.boundFacts],
severity=severity,
value=result,
aspects=contextView(sphinxContext))
else:
sphinxContext.modelXbrl.log(logSeverity,
name,
_("Validation rule %(severity)s for %(aspects)s"),
sourceFileLine=[node.sourceFileLine] +
[(fact.modelDocument.uri, fact.sourceline) for fact in hsBindings.boundFacts],
severity=severity,
aspects=contextView(sphinxContext))
hsBindings.next() # raises StopIteration when done
except __HOLE__:
if sphinxContext.formulaOptions.traceVariableSetExpressionResult:
sphinxContext.modelXbrl.info("sphinx:trace",
_("%(node)s StopIteration"),
sourceFileLine=node.sourceFileLine, node=nodeId)
except SphinxException as ex:
sphinxContext.modelXbrl.log("ERROR",
ex.code,
_("Exception in %(node)s: %(exception)s"),
node=nodeId,
ruleName=name,
exception=ex.message % ex.kwargs,
sourceFileLine=[node.sourceFileLine] + ([ex.node.sourceFileLine] if ex.node is not node else []),
**ex.kwargs)
if hsBindings is not None:
hsBindings.close()
return None | StopIteration | dataset/ETHPy150Open Arelle/Arelle/arelle/plugin/sphinx/SphinxEvaluator.py/evaluateRule |
def evaluateUnaryOperation(node, sphinxContext):
if node.op == "brackets": # parentheses around an expression
return node.expr
value = evaluate(node.expr, sphinxContext, value=True, fallback=UNBOUND)
if value is UNBOUND:
return UNBOUND
try:
result = {'+': operator.pos, '-': operator.neg, 'not': operator.not_,
'values': noop,
}[node.op](value)
return result
except __HOLE__:
sphinxContext.modelXbrl.error("sphinx:error",
_("%(node)s operation %(op)s not implemented"),
modelObject=node, op=node.op)
return None | KeyError | dataset/ETHPy150Open Arelle/Arelle/arelle/plugin/sphinx/SphinxEvaluator.py/evaluateUnaryOperation |
def evaluateVariableReference(node, sphinxContext):
try:
return sphinxContext.localVariables[node.variableName]
except __HOLE__:
if node.variableName in sphinxContext.constants:
return evaluateConstant(sphinxContext.constants[node.variableName], sphinxContext)
raise SphinxException(node,
"sphinx:variableName",
_("unassigned variable name %(name)s"),
name=node.variableName) | KeyError | dataset/ETHPy150Open Arelle/Arelle/arelle/plugin/sphinx/SphinxEvaluator.py/evaluateVariableReference |
def RenderBranch(self, path, request):
"""Renders tree leafs for filesystem path."""
aff4_root = rdfvalue.RDFURN(request.REQ.get("aff4_root", self.root_path))
urn = aff4_root.Add(path)
try:
directory = aff4.FACTORY.Create(urn, "VFSDirectory", mode="r",
token=request.token)
children = list(directory.ListChildren(limit=100000))
infos = aff4.FACTORY.Stat(children, token=request.token)
info_by_urn = {}
for info in infos:
info_by_urn[info["urn"]] = info
for child_urn in children:
info = info_by_urn.get(child_urn)
if info:
typeinfo = info.get("type")
if typeinfo:
class_name = typeinfo[1]
cls = aff4.AFF4Object.classes.get(class_name)
if cls and "Container" not in cls.behaviours:
continue
self.AddElement(child_urn.RelativeName(urn))
except __HOLE__ as e:
self.message = "Error fetching %s: %s" % (urn, e) | IOError | dataset/ETHPy150Open google/grr/grr/gui/plugins/configuration_view.py/ConfigurationTree.RenderBranch |
def close(self):
"""Closes all files. If you put real :class:`file` objects into the
:attr:`files` dict you can call this method to automatically close
them all in one go.
"""
if self.closed:
return
try:
files = itervalues(self.files)
except __HOLE__:
files = ()
for f in files:
try:
f.close()
except Exception:
pass
self.closed = True | AttributeError | dataset/ETHPy150Open Eforcers/gae-flask-todo/lib/werkzeug/test.py/EnvironBuilder.close |
def error(self, obj, name, value):
"""Returns a descriptive error string."""
# pylint: disable=E1101
if self.low is None and self.high is None:
if self.units:
info = "a float having units compatible with '%s'" % self.units
else:
info = "a float"
elif self.low is not None and self.high is not None:
right = ']'
left = '['
if self.exclude_high is True:
right = ')'
if self.exclude_low is True:
left = '('
info = "a float in the range %s%s, %s%s" % \
(left, self.low, self.high, right)
elif self.low is not None:
info = "a float with a value > %s" % self.low
else: # self.high is not None
info = "a float with a value < %s" % self.high
vtype = type(value)
msg = "Variable '%s' must be %s, but a value of %s %s was specified." \
% (name, info, value, vtype)
try:
obj.raise_exception(msg, ValueError)
except __HOLE__:
raise ValueError(msg) | AttributeError | dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/datatypes/float.py/Float.error |
def _validate_with_metadata(self, obj, name, value, src_units):
"""Perform validation and unit conversion using metadata from
the source trait.
"""
# pylint: disable=E1101
dst_units = self.units
if isinstance(value, UncertainDistribution):
value = value.getvalue()
# FIXME: The try blocks testing whether the unit is bogus or undefined
# are generally redundant because that test is done at creation. HOWEVER
# you might have a case where it wasn't tested because it's technically
# not a float. NPSS wrapper may be such a case. A test needs to be
# constructed to test these lines.
try:
pq = PhysicalQuantity(value, src_units)
except NameError:
raise NameError("while setting value of %s: undefined unit '%s'" %
(src_units, name))
try:
pq.convert_to_unit(dst_units)
except __HOLE__:
raise NameError("undefined unit '%s' for variable '%s'" %
(dst_units, name))
except TypeError:
msg = "%s: units '%s' are incompatible " % (name, src_units) + \
"with assigning units of '%s'" % (dst_units)
raise TypeError(msg)
return pq.value | NameError | dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/datatypes/float.py/Float._validate_with_metadata |
def get_parent(self):
"""
Get the parent model. Returns None if current element is the root element.
"""
if self.parent_id:
for model in CascadeModelBase._get_cascade_elements():
try:
return model.objects.get(id=self.parent_id)
except __HOLE__:
pass | ObjectDoesNotExist | dataset/ETHPy150Open jrief/djangocms-cascade/cmsplugin_cascade/models_base.py/CascadeModelBase.get_parent |
@register.render_tag
def page_menu(context, token):
"""
Return a list of child pages for the given parent, storing all
pages in a dict in the context when first called using parents as keys
for retrieval on subsequent recursive calls from the menu template.
"""
# First arg could be the menu template file name, or the parent page.
# Also allow for both to be used.
template_name = None
parent_page = None
parts = token.split_contents()[1:]
for part in parts:
part = Variable(part).resolve(context)
if isinstance(part, str):
template_name = part
elif isinstance(part, Page):
parent_page = part
if template_name is None:
try:
template_name = context["menu_template_name"]
except __HOLE__:
error = "No template found for page_menu in: %s" % parts
raise TemplateSyntaxError(error)
context["menu_template_name"] = template_name
if "menu_pages" not in context:
try:
user = context["request"].user
slug = context["request"].path
except KeyError:
user = None
slug = ""
num_children = lambda id: lambda: len(context["menu_pages"][id])
has_children = lambda id: lambda: num_children(id)() > 0
rel = [m.__name__.lower()
for m in Page.get_content_models()
if not m._meta.proxy]
published = Page.objects.published(for_user=user).select_related(*rel)
# Store the current page being viewed in the context. Used
# for comparisons in page.set_menu_helpers.
if "page" not in context:
try:
context.dicts[0]["_current_page"] = published.exclude(
content_model="link").get(slug=slug)
except Page.DoesNotExist:
context.dicts[0]["_current_page"] = None
elif slug:
context.dicts[0]["_current_page"] = context["page"]
# Some homepage related context flags. on_home is just a helper
# indicated we're on the homepage. has_home indicates an actual
# page object exists for the homepage, which can be used to
# determine whether or not to show a hard-coded homepage link
# in the page menu.
home = home_slug()
context.dicts[0]["on_home"] = slug == home
context.dicts[0]["has_home"] = False
# Maintain a dict of page IDs -> parent IDs for fast
# lookup in setting page.is_current_or_ascendant in
# page.set_menu_helpers.
context.dicts[0]["_parent_page_ids"] = {}
pages = defaultdict(list)
for page in published.order_by("_order"):
page.set_helpers(context)
context["_parent_page_ids"][page.id] = page.parent_id
setattr(page, "num_children", num_children(page.id))
setattr(page, "has_children", has_children(page.id))
pages[page.parent_id].append(page)
if page.slug == home:
context.dicts[0]["has_home"] = True
# Include menu_pages in all contexts, not only in the
# block being rendered.
context.dicts[0]["menu_pages"] = pages
# ``branch_level`` must be stored against each page so that the
# calculation of it is correctly applied. This looks weird but if we do
# the ``branch_level`` as a separate arg to the template tag with the
# addition performed on it, the addition occurs each time the template
# tag is called rather than once per level.
context["branch_level"] = 0
parent_page_id = None
if parent_page is not None:
context["branch_level"] = getattr(parent_page, "branch_level", 0) + 1
parent_page_id = parent_page.id
# Build the ``page_branch`` template variable, which is the list of
# pages for the current parent. Here we also assign the attributes
# to the page object that determines whether it belongs in the
# current menu template being rendered.
context["page_branch"] = context["menu_pages"].get(parent_page_id, [])
context["page_branch_in_menu"] = False
for page in context["page_branch"]:
page.in_menu = page.in_menu_template(template_name)
page.num_children_in_menu = 0
if page.in_menu:
context["page_branch_in_menu"] = True
for child in context["menu_pages"].get(page.id, []):
if child.in_menu_template(template_name):
page.num_children_in_menu += 1
page.has_children_in_menu = page.num_children_in_menu > 0
page.branch_level = context["branch_level"]
page.parent = parent_page
context["parent_page"] = page.parent
# Prior to pages having the ``in_menus`` field, pages had two
# boolean fields ``in_navigation`` and ``in_footer`` for
# controlling menu inclusion. Attributes and variables
# simulating these are maintained here for backwards
# compatibility in templates, but will be removed eventually.
page.in_navigation = page.in_menu
page.in_footer = not (not page.in_menu and "footer" in template_name)
if page.in_navigation:
context["page_branch_in_navigation"] = True
if page.in_footer:
context["page_branch_in_footer"] = True
t = get_template(template_name)
return t.render(Context(context)) | KeyError | dataset/ETHPy150Open stephenmcd/mezzanine/mezzanine/pages/templatetags/pages_tags.py/page_menu |
@register.render_tag
def set_page_permissions(context, token):
"""
Assigns a permissions dict to the given page instance, combining
Django's permission for the page's model and a permission check
against the instance itself calling the page's ``can_add``,
``can_change`` and ``can_delete`` custom methods.
Used within the change list for pages, to implement permission
checks for the navigation tree.
"""
page = context[token.split_contents()[1]]
model = page.get_content_model()
try:
opts = model._meta
except __HOLE__:
if model is None:
error = _("Could not load the model for the following page, "
"was it removed?")
obj = page
else:
# A missing inner Meta class usually means the Page model
# hasn't been directly subclassed.
error = _("An error occured with the following class. Does "
"it subclass Page directly?")
obj = model.__class__.__name__
raise ImproperlyConfigured(error + " '%s'" % obj)
perm_name = opts.app_label + ".%s_" + opts.object_name.lower()
request = context["request"]
setattr(page, "perms", {})
for perm_type in ("add", "change", "delete"):
perm = request.user.has_perm(perm_name % perm_type)
perm = perm and getattr(model, "can_%s" % perm_type)(request)
page.perms[perm_type] = perm
return "" | AttributeError | dataset/ETHPy150Open stephenmcd/mezzanine/mezzanine/pages/templatetags/pages_tags.py/set_page_permissions |
def wikipedia_url_parse(url):
seed_page = "https://en.wikipedia.org" #Crawling the English Wikipedia
try:
from urllib.parse import urlparse
except __HOLE__:
from urlparse import urlparse
url = url #.lower() #Make it lower case
s = urlparse(url) #parse the given url
seed_page_n = seed_page #.lower() #Make it lower case
#t = urlparse(seed_page_n) #parse the seed page (reference page)
i = 0
flag = 0
while i<=9:
if url == "/":
url = seed_page_n
flag = 0
elif not s.scheme:
url = "http://" + url
flag = 0
elif "#" in url:
url = url[:url.find("#")]
flag = 0
elif "?" in url:
url = url[:url.find("?")]
flag = 0
elif s.netloc == "":
url = seed_page + s.path
flag = 0
elif url[len(url)-1] == "/":
url = url[:-1]
flag = 0
else:
url = url
flag = 0
break
i = i+1
s = urlparse(url) #Parse after every loop to update the values of url parameters
return(url, flag)
#Main Crawl function that calls all the above function and crawls the entire site sequentially | ImportError | dataset/ETHPy150Open hardikvasa/webb/webb/webb.py/wikipedia_url_parse |
def get_product_price_gross(self, request):
"""
Returns the product item price. Based on selected properties, etc.
"""
if not self.product.is_configurable_product():
price = self.product.get_price_gross(request, amount=self.amount)
else:
if self.product.active_price_calculation:
try:
price = self.get_calculated_price(request)
except:
price = self.product.get_price_gross(request, amount=self.amount)
else:
price = self.product.get_price_gross(request, with_properties=False, amount=self.amount)
for property in self.properties.all():
if property.property.is_select_field:
try:
option = PropertyOption.objects.get(pk=int(float(property.value)))
except (PropertyOption.DoesNotExist, AttributeError, ValueError):
pass
else:
try:
option_price = float(option.price)
except (__HOLE__, ValueError):
pass
else:
if not self.product.price_includes_tax(request):
option_price = option_price * ((100 + self.product.get_tax_rate(request)) / 100)
price += option_price
return price | TypeError | dataset/ETHPy150Open diefenbach/django-lfs/lfs/cart/models.py/CartItem.get_product_price_gross |
def get_properties(self):
"""
Returns properties of the cart item. Resolves option names for select
fields.
"""
properties = []
for prop_dict in self.product.get_properties():
prop = prop_dict['property']
property_group = prop_dict['property_group']
price = ""
try:
cipv = CartItemPropertyValue.objects.get(cart_item=self,
property=prop,
property_group=property_group)
except CartItemPropertyValue.DoesNotExist:
continue
if prop.is_select_field:
try:
option = PropertyOption.objects.get(pk=int(float(cipv.value)))
except (PropertyOption.DoesNotExist, __HOLE__):
value = cipv.value
price = 0.0
else:
value = option.name
price = option.price
elif prop.is_number_field:
format_string = "%%.%sf" % prop.decimal_places
try:
value = format_string % float(cipv.value)
except ValueError:
value = locale.format("%.2f", float(cipv.value))
else:
value = cipv.value
properties.append({
"name": prop.name,
"title": prop.title,
"unit": prop.unit,
"display_price": prop.display_price,
"value": value,
"price": price,
"obj": prop,
"property_group": property_group,
"property_group_name": property_group.name
})
properties = sorted(properties, key=lambda x: '{0}-{1}'.format(x['property_group_name'], x['obj'].position))
return properties | ValueError | dataset/ETHPy150Open diefenbach/django-lfs/lfs/cart/models.py/CartItem.get_properties |
def stub_if_missing_deps(*deps):
"""A class decorator that will try to import the specified modules and in
the event of failure will stub out the class, raising a RuntimeError that
explains the missing dependencies whenever an attempt is made to
instantiate the class.
deps: str args
args in deps may have the form a.b.c or a.b.c:attr, where attr would be
searched for within the module a.b.c after a.b.c is successfully imported.
"""
def _find_failed_imports():
failed = []
for dep in deps:
parts = dep.split(':')
modname = parts[0]
attrname = parts[1] if len(parts)>1 else None
try:
__import__(modname)
except __HOLE__ as err:
failed.append(str(err).split()[-1])
continue
if attrname and not hasattr(sys.modules[modname], attrname):
failed.append('.'.join([modname, attrname]))
return failed
def _stub_if_missing(obj):
failed = _find_failed_imports()
if failed:
if isclass(obj):
def _error(obj, *args, **kwargs):
msg = "The %s class depends on the following modules or attributes which were not found on your system: %s"
raise RuntimeError(msg % (obj.__name__, failed))
obj.__new__ = staticmethod(_error)
elif isfunction(obj):
body = "raise RuntimeError(\"The %s function depends on the following modules or attributes which were not found on your system: %s\")"
return replace_funct(obj, body % (obj.__name__, failed))
return obj
return _stub_if_missing | ImportError | dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.util/src/openmdao/util/decorators.py/stub_if_missing_deps |
def test_sequential_flow_interrupted_externally(self):
flow = lf.Flow('flow-1').add(
utils.ProgressingTask(name='task1'),
utils.ProgressingTask(name='task2'),
utils.ProgressingTask(name='task3'),
)
engine = self._make_engine(flow)
def _run_engine_and_raise():
engine_states = {}
engine_it = engine.run_iter()
while True:
try:
engine_state = six.next(engine_it)
if engine_state not in engine_states:
engine_states[engine_state] = 1
else:
engine_states[engine_state] += 1
if engine_states.get(states.SCHEDULING) == 2:
engine_state = engine_it.throw(IOError("I Broke"))
if engine_state not in engine_states:
engine_states[engine_state] = 1
else:
engine_states[engine_state] += 1
except __HOLE__:
break
self.assertRaises(IOError, _run_engine_and_raise)
self.assertEqual(states.FAILURE, engine.storage.get_flow_state()) | StopIteration | dataset/ETHPy150Open openstack/taskflow/taskflow/tests/unit/test_engines.py/EngineLinearFlowTest.test_sequential_flow_interrupted_externally |
def test_sequential_flow_iter_suspend_resume(self):
flow = lf.Flow('flow-2').add(
utils.ProgressingTask(name='task1'),
utils.ProgressingTask(name='task2')
)
lb, fd = p_utils.temporary_flow_detail(self.backend)
engine = self._make_engine(flow, flow_detail=fd)
with utils.CaptureListener(engine, capture_flow=False) as capturer:
it = engine.run_iter()
gathered_states = []
suspend_it = None
while True:
try:
s = it.send(suspend_it)
gathered_states.append(s)
if s == states.WAITING:
# Stop it before task2 runs/starts.
suspend_it = True
except __HOLE__:
break
self.assertTrue(len(gathered_states) > 0)
expected = ['task1.t RUNNING', 'task1.t SUCCESS(5)']
self.assertEqual(expected, capturer.values)
self.assertEqual(states.SUSPENDED, engine.storage.get_flow_state())
# Attempt to resume it and see what runs now...
with utils.CaptureListener(engine, capture_flow=False) as capturer:
gathered_states = list(engine.run_iter())
self.assertTrue(len(gathered_states) > 0)
expected = ['task2.t RUNNING', 'task2.t SUCCESS(5)']
self.assertEqual(expected, capturer.values)
self.assertEqual(states.SUCCESS, engine.storage.get_flow_state()) | StopIteration | dataset/ETHPy150Open openstack/taskflow/taskflow/tests/unit/test_engines.py/EngineLinearFlowTest.test_sequential_flow_iter_suspend_resume |
def __init__(self, *args, **kwargs):
super(ListResource, self).__init__(*args, **kwargs)
try:
self.key
except __HOLE__:
self.key = self.name.lower() | AttributeError | dataset/ETHPy150Open awslabs/lambda-apigateway-twilio-tutorial/twilio/rest/resources/base.py/ListResource.__init__ |
def main():
args = parser.parse_args()
# Generate Heroku app name.
app_name = args.app or args.project_name.replace("_", "-")
# Create the project.
try:
os.makedirs(args.dest_dir)
except __HOLE__:
pass
management.call_command("startproject",
args.project_name,
args.dest_dir,
template = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "project_template")),
extensions = ("py", "txt", "slugignore", "conf", "gitignore", "sh",),
app_name = app_name,
user = getpass.getuser(),
)
# Make management scripts executable.
make_executable(os.path.join(args.dest_dir, "manage.py"))
# Audit and configure the project for Heroku.
audit_args = ["python", os.path.join(args.dest_dir, "manage.py"), "heroku_audit", "--fix"]
if not args.interactive:
audit_args.append("--noinput")
audit_returncode = subprocess.call(audit_args)
if audit_returncode != 0:
sys.exit(audit_returncode)
# Give some help to the user.
print "Heroku project created." | OSError | dataset/ETHPy150Open etianen/django-herokuapp/herokuapp/bin/herokuapp_startproject.py/main |
def _authenticate_keystone(self):
if self.user_id:
creds = {'userId': self.user_id,
'password': self.password}
else:
creds = {'username': self.username,
'password': self.password}
if self.tenant_id:
body = {'auth': {'passwordCredentials': creds,
'tenantId': self.tenant_id, }, }
else:
body = {'auth': {'passwordCredentials': creds,
'tenantName': self.tenant_name, }, }
if self.auth_url is None:
raise exceptions.NoAuthURLProvided()
token_url = self.auth_url + "/tokens"
resp, resp_body = self._cs_request(token_url, "POST",
body=json.dumps(body),
content_type="application/json",
allow_redirects=True)
if resp.status_code != 200:
raise exceptions.Unauthorized(message=resp_body)
if resp_body:
try:
resp_body = json.loads(resp_body)
except __HOLE__:
pass
else:
resp_body = None
self._extract_service_catalog(resp_body) | ValueError | dataset/ETHPy150Open openstack/python-neutronclient/neutronclient/client.py/HTTPClient._authenticate_keystone |
def request(self, *args, **kwargs):
kwargs.setdefault('authenticated', False)
kwargs.setdefault('raise_exc', False)
content_type = kwargs.pop('content_type', None) or 'application/json'
headers = kwargs.setdefault('headers', {})
headers.setdefault('Accept', content_type)
try:
kwargs.setdefault('data', kwargs.pop('body'))
except __HOLE__:
pass
if kwargs.get('data'):
headers.setdefault('Content-Type', content_type)
resp = super(SessionClient, self).request(*args, **kwargs)
return resp, resp.text | KeyError | dataset/ETHPy150Open openstack/python-neutronclient/neutronclient/client.py/SessionClient.request |
def get_auth_info(self):
auth_info = {'auth_token': self.auth_token,
'endpoint_url': self.endpoint_url}
# NOTE(jamielennox): This is the best we can do here. It will work
# with identity plugins which is the primary case but we should
# deprecate it's usage as much as possible.
try:
get_access = (self.auth or self.session.auth).get_access
except __HOLE__:
pass
else:
auth_ref = get_access(self.session)
auth_info['auth_tenant_id'] = auth_ref.project_id
auth_info['auth_user_id'] = auth_ref.user_id
return auth_info
# FIXME(bklei): Should refactor this to use kwargs and only
# explicitly list arguments that are not None. | AttributeError | dataset/ETHPy150Open openstack/python-neutronclient/neutronclient/client.py/SessionClient.get_auth_info |
def get_review_request(self):
"""Return the ReviewRequest that this file is attached to."""
if hasattr(self, '_review_request'):
return self._review_request
try:
return self.review_request.all()[0]
except IndexError:
try:
return self.inactive_review_request.all()[0]
except __HOLE__:
# Maybe it's on a draft.
try:
draft = self.drafts.get()
except ObjectDoesNotExist:
draft = self.inactive_drafts.get()
return draft.review_request | IndexError | dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/attachments/models.py/FileAttachment.get_review_request |
def cleanse_setting(key, value):
"""Cleanse an individual setting key/value of sensitive content.
If the value is a dictionary, recursively cleanse the keys in
that dictionary.
"""
try:
if HIDDEN_SETTINGS.search(key):
cleansed = '********************'
else:
if isinstance(value, dict):
cleansed = dict((k, cleanse_setting(k, v)) for k,v in value.items())
else:
cleansed = value
except __HOLE__:
# If the key isn't regex-able, just return as-is.
cleansed = value
return cleansed | TypeError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/views/debug.py/cleanse_setting |
def get_traceback_html(self):
"Return HTML code for traceback."
if issubclass(self.exc_type, TemplateDoesNotExist):
from django.template.loader import template_source_loaders
self.template_does_not_exist = True
self.loader_debug_info = []
for loader in template_source_loaders:
try:
module = import_module(loader.__module__)
if hasattr(loader, '__class__'):
source_list_func = loader.get_template_sources
else: # NOTE: Remember to remove this branch when we deprecate old template loaders in 1.4
source_list_func = module.get_template_sources
# NOTE: This assumes exc_value is the name of the template that
# the loader attempted to load.
template_list = [{'name': t, 'exists': os.path.exists(t)} \
for t in source_list_func(str(self.exc_value))]
except (__HOLE__, AttributeError):
template_list = []
if hasattr(loader, '__class__'):
loader_name = loader.__module__ + '.' + loader.__class__.__name__
else: # NOTE: Remember to remove this branch when we deprecate old template loaders in 1.4
loader_name = loader.__module__ + '.' + loader.__name__
self.loader_debug_info.append({
'loader': loader_name,
'templates': template_list,
})
if (settings.TEMPLATE_DEBUG and hasattr(self.exc_value, 'source') and
isinstance(self.exc_value, TemplateSyntaxError)):
self.get_template_exception_info()
frames = self.get_traceback_frames()
unicode_hint = ''
if issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = smart_unicode(unicode_str[max(start-5, 0):min(end+5, len(unicode_str))], 'ascii', errors='replace')
from django import get_version
t = Template(TECHNICAL_500_TEMPLATE, name='Technical 500 template')
c = Context({
'exception_type': self.exc_type.__name__,
'exception_value': smart_unicode(self.exc_value, errors='replace'),
'unicode_hint': unicode_hint,
'frames': frames,
'lastframe': frames[-1],
'request': self.request,
'settings': get_safe_settings(),
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'server_time': datetime.datetime.now(),
'django_version_info': get_version(),
'sys_path' : sys.path,
'template_info': self.template_info,
'template_does_not_exist': self.template_does_not_exist,
'loader_debug_info': self.loader_debug_info,
})
return t.render(c) | ImportError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/views/debug.py/ExceptionReporter.get_traceback_html |
def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
source = None
if loader is not None and hasattr(loader, "get_source"):
source = loader.get_source(module_name)
if source is not None:
source = source.splitlines()
if source is None:
try:
f = open(filename)
try:
source = f.readlines()
finally:
f.close()
except (__HOLE__, IOError):
pass
if source is None:
return None, [], None, []
encoding = 'ascii'
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (http://www.python.org/dev/peps/pep-0263/)
match = re.search(r'coding[:=]\s*([-\w.]+)', line)
if match:
encoding = match.group(1)
break
source = [unicode(sline, encoding, 'replace') for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
pre_context = [line.strip('\n') for line in source[lower_bound:lineno]]
context_line = source[lineno].strip('\n')
post_context = [line.strip('\n') for line in source[lineno+1:upper_bound]]
return lower_bound, pre_context, context_line, post_context | OSError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/views/debug.py/ExceptionReporter._get_lines_from_file |
def technical_404_response(request, exception):
"Create a technical 404 error response. The exception should be the Http404."
try:
tried = exception.args[0]['tried']
except (IndexError, __HOLE__, KeyError):
tried = []
else:
if not tried:
# tried exists but is an empty list. The URLconf must've been empty.
return empty_urlconf(request)
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
t = Template(TECHNICAL_404_TEMPLATE, name='Technical 404 template')
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': request.path_info[1:], # Trim leading slash
'urlpatterns': tried,
'reason': smart_str(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
})
return HttpResponseNotFound(t.render(c), mimetype='text/html') | TypeError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/views/debug.py/technical_404_response |
def _add_thread_exception(self, thread):
"""Create a StreamRuntimeError exception object and fill attributes with all necessary
values.
"""
node = thread.node
exception = StreamRuntimeError(node=node, exception=thread.exception)
exception.traceback = thread.traceback
exception.inputs = [pipe.fields for pipe in node.inputs]
if not isinstance(node, TargetNode):
try:
exception.ouputs = node.output_fields
except:
pass
node_info = node.node_info
attrs = {}
if node_info and "attributes" in node_info:
for attribute in node_info["attributes"]:
attr_name = attribute.get("name")
if attr_name:
try:
value = getattr(node, attr_name)
except __HOLE__:
value = "<attribute %s does not exist>" % attr_name
except Exception , e:
value = e
attrs[attr_name] = value
exception.attributes = attrs
self.exceptions.append(exception) | AttributeError | dataset/ETHPy150Open Stiivi/brewery/brewery/streams.py/Stream._add_thread_exception |
def handle(self, *args, **options):
super(Command, self).handle(*args, **options)
"""
Connect to Github using token stored in environment, loop over model fields, and \
create an issue for any choice field missing
"""
self.dry_run = options["dry_run"]
# set up connect to Github account
self.gh = Github(os.getenv('GITHUB_TOKEN'))
self.org = self.gh.get_organization("california-civic-data-coalition")
self.repo = self.org.get_repo("django-calaccess-raw-data")
self.labels = [
self.repo.get_label("small"),
self.repo.get_label("documentation"),
self.repo.get_label("enhancement"),
]
self.header(
"Creating GitHub issues for model choice fields"
)
model_list = sorted(
get_model_list(),
key=lambda x: (x().klass_group, x().klass_name)
)
models_to_fix = []
for m in model_list:
fields_to_fix = {}
for f in m._meta.fields:
if f.name == 'id':
continue
# test for verbose name
if not f.__dict__['_verbose_name']:
fields_to_fix[f] = {'no_verbose': True, 'no_help': False}
elif len(f.__dict__['_verbose_name']) == 0:
fields_to_fix[f] = {'no_verbose': True, 'no_help': False}
# test for help text
if len(f.help_text) == 0:
try:
fields_to_fix[f]['no_help'] = True
except __HOLE__:
fields_to_fix[f] = {'no_verbose': False, 'no_help': True}
if len(fields_to_fix) > 0:
fs = []
for k, v in fields_to_fix.items():
fs.append((k, v))
models_to_fix.append(
(m, tuple(fs))
)
for model, fields in models_to_fix:
context = dict(
model_name=model.__name__,
model_docs=model().DOCUMENTCLOUD_PAGES,
file_name=model.__module__.split('.')[-1] + '.py',
fields=fields,
)
title = "Add verbose and/or help text fields on {model_name} (in \
{file_name})".format(**context)
body = render_to_string(
'toolbox/createverboseandhelptextissues.md',
context,
)
self.log("-- Creating issue for {model_name}".format(**context))
if self.dry_run:
print '=========================='
print title
print '--------------------------'
print body
print '=========================='
else:
self.repo.create_issue(
title,
body=body,
labels=self.labels,
)
time.sleep(2.5) | KeyError | dataset/ETHPy150Open california-civic-data-coalition/django-calaccess-raw-data/example/toolbox/management/commands/createverboseandhelptextissues.py/Command.handle |
def get_text_object_region(view, s, text_object, inclusive=False, count=1):
try:
delims, type_ = PAIRS[text_object]
except __HOLE__:
return s
if type_ == TAG:
begin_tag, end_tag, _ = find_containing_tag(view, s.b)
if inclusive:
return sublime.Region(begin_tag.a, end_tag.b)
else:
return sublime.Region(begin_tag.b, end_tag.a)
if type_ == PARAGRAPH:
return find_paragraph_text_object(view, s, inclusive=inclusive, count=count)
if type_ == BRACKET:
b = resolve_insertion_point_at_b(s)
opening = find_prev_lone_bracket(view, b, delims)
closing = find_next_lone_bracket(view, b, delims)
if not (opening and closing):
return s
if inclusive:
return sublime.Region(opening.a, closing.b)
return sublime.Region(opening.a + 1, closing.b - 1)
if type_ == QUOTE:
# Vim only operates on the current line.
line = view.line(s)
# FIXME: Escape sequences like \" are probably syntax-dependant.
prev_quote = reverse_search_by_pt(view, r'(?<!\\\\)' + delims[0],
start=line.a, end=s.b)
next_quote = find_in_range(view, r'(?<!\\\\)' + delims[0],
start=s.b, end=line.b)
if next_quote and not prev_quote:
prev_quote = next_quote
next_quote = find_in_range(view, r'(?<!\\\\)' + delims[0],
start=prev_quote.b, end=line.b)
if not (prev_quote and next_quote):
return s
if inclusive:
return sublime.Region(prev_quote.a, next_quote.b)
return sublime.Region(prev_quote.a + 1, next_quote.b - 1)
if type_ == WORD:
w = a_word(view, s.b, inclusive=inclusive, count=count)
if not w:
return s
if s.size() <= 1:
return w
return sublime.Region(s.a, w.b)
if type_ == BIG_WORD:
w = a_big_word(view, s.b, inclusive=inclusive, count=count)
if not w:
return s
if s.size() <= 1:
return w
return sublime.Region(s.a, w.b)
if type_ == SENTENCE:
# FIXME: This doesn't work well.
# TODO: Improve this.
sentence_start = view.find_by_class(s.b,
forward=False,
classes=sublime.CLASS_EMPTY_LINE)
sentence_start_2 = reverse_search_by_pt(view, r'[.?!:]\s+|[.?!:]$',
start=0,
end=s.b)
if sentence_start_2:
sentence_start = (sentence_start + 1 if (sentence_start >
sentence_start_2.b)
else sentence_start_2.b)
else:
sentence_start = sentence_start + 1
sentence_end = find_in_range(view, r'([.?!:)](?=\s))|([.?!:)]$)',
start=s.b,
end=view.size())
if not (sentence_end):
return s
if inclusive:
return sublime.Region(sentence_start, sentence_end.b)
else:
return sublime.Region(sentence_start, sentence_end.b)
return s | KeyError | dataset/ETHPy150Open guillermooo/Vintageous/vi/text_objects.py/get_text_object_region |
def get_sha1_params_url(path_info):
"""Returns the sha1 of requested page and params app.
"""
# if settings.DEBUG:
# print "## get_sha1_params_url() :"
slug = ''
params = ''
params_admin = '/'
index = 0
separator = False
error = False
admin = False
# We delete the first and last '/' for the next loop.
items = path_info[1:]
try:
if items[-1] == u'/':
items = items[:-1]
except IndexError:
pass
items = items.split('/')
# if settings.DEBUG:
# print "ITEMS : ", items
# Detection of url params
for item in items:
if len(item) < settings.SLUG_MIN_SIZE:
# Separator found => next items are the url app
separator = items[index]
break
else:
# This item still concerns the page slug, we continue the loop
index += 1
# We compute slug and params
if separator:
# Slug de la page
slug = '/' + '/'.join(items[:index])
# We check if there is a admin url
# in the rest of the params
if separator != settings.URL_ADMIN_SEP:
try:
wa_index = items[index+1:].index(settings.URL_ADMIN_SEP)
# Update separator
separator = settings.URL_ADMIN_SEP
# if settings.DEBUG:
# print "Computing params_admin :"
# print "index : ", index
# print "wa_index : ", wa_index
params = '/' + '/'.join(items[index+1:index+1+wa_index])
params_admin = '/' + '/'.join(items[index+1:][wa_index+1:])
except __HOLE__:
params_admin = '/'
else:
params_admin = '/' + '/'.join(items[index+1:])
params = '/'
# If problems..
if not params:
params = '/' + '/'.join(items[index+1:])
# if settings.DEBUG:
# print "## Separator found : ", separator
# FIXME : Reorganize separator validity...
# Check separator validity
if separator == settings.URL_PAGE_APP_SEP:
if params == '/':
# url avec le tag de params, sans params
error = True
elif separator == settings.URL_ADMIN_SEP:
# url de l'admin
admin = True
# Si params vide => ERROR
if params_admin == '/':
error = True
else:
# Separator non conforme
error = True
else:
# if settings.DEBUG:
# print "## Separator not found"
slug = path_info
params = u'/'
# Add the final '/' if missing
if slug[-1] != u'/' :
slug += u'/'
if params[-1] != u'/' :
params += u'/'
if params_admin[-1] != u'/' :
params_admin += u'/'
# Computing sha1
sha1_slug = sha1(slug).hexdigest()
# if settings.DEBUG:
# print "slug : ", slug, " => ", sha1_slug
# print "params : ", params
# print "params admin : ", params_admin
# print "##"
return {'sha1': sha1_slug,
'params': params,
'error': error,
'admin': admin,
'params_admin': params_admin} | ValueError | dataset/ETHPy150Open ionyse/ionyweb/ionyweb/website/utils.py/get_sha1_params_url |
def _resolve_request(self, url_path, format):
# custom provider
if self.application.custom_provider is not None:
response = self.application.custom_provider(self.request)
if response is not None:
return self._write_response(response)
# get request data
self.format = self._get_format(format)
method = self.request.method
self.status_code = int(self.get_argument("__statusCode", 200))
# upstream server
upstream_server = self.api_data.get_upstream_server(
"%s-%s" % (method, url_path))
if self.api_data.upstream_server and upstream_server:
return self._handle_request_on_upstream()
# mock
provider = rest.FilesMockProvider(self.api_dir)
response = rest.resolve_request(
provider, method, url_path, self.status_code, self.format)
if provider.error:
if self.api_data.upstream_server:
return self._handle_request_on_upstream()
# set content type
already_content_type = [
item[0] for item in response.headers
if item[0] == 'Content-Type']
if not already_content_type:
content_type = SUPPORTED_FORMATS[self.format][0]
response.headers.append(
("Content-Type", "%s; charset=utf-8" % content_type))
response.headers.append(
("Access-Control-Allow-Origin", "*"))
# log request
self.log_request(response)
# response
try:
self.set_status(response.status_code)
except __HOLE__:
self._reason = 'Custom status code'
self.set_headers(response.headers)
self.write(response.content)
self.finish() | ValueError | dataset/ETHPy150Open tomashanacek/mock-server/mock_server/handlers.py/MainHandler._resolve_request |
def _skip_row(self, row):
if row == []:
return True
if row[0] == 'Total':
return True
if row[0] == 'Official Totals':
return True
if isinstance(row[0], float):
return False
if row[0].strip() == '':
return True
if row[0].startswith("* The State Canvassing Board"):
return True
if row[0].replace('\n',' ').strip() in self.target_offices:
return True
# if the contents of the second cell is not a float, skip that row
try:
float(row[1])
return False
except ValueError:
return True
except __HOLE__:
return True | IndexError | dataset/ETHPy150Open openelections/openelections-core/openelex/us/wy/load.py/WYLoader._skip_row |
def _votes(self, val):
"""
Returns cleaned version of votes or 0 if it's a non-numeric value.
"""
try:
return int(float(val))
except __HOLE__:
# Count'y convert value from string
return None | ValueError | dataset/ETHPy150Open openelections/openelections-core/openelex/us/wy/load.py/WYLoader._votes |
def _writein(self, row):
# sometimes write-in field not present
try:
write_in = row['Write-In?'].strip()
except __HOLE__:
write_in = None
return write_in | KeyError | dataset/ETHPy150Open openelections/openelections-core/openelex/us/wy/load.py/WYLoader._writein |
def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613
'''
Search the environment for the relative path
'''
fnd = {'path': '', 'rel': ''}
if os.path.isabs(path):
return fnd
if tgt_env not in envs():
return fnd
if os.path.basename(path) == 'top.sls':
log.debug('minionfs will NOT serve top.sls '
'for security reasons (path requested: {0})'.format(path))
return fnd
mountpoint = salt.utils.url.strip_proto(__opts__['minionfs_mountpoint'])
# Remove the mountpoint to get the "true" path
path = path[len(mountpoint):].lstrip(os.path.sep)
try:
minion, pushed_file = path.split(os.sep, 1)
except __HOLE__:
return fnd
if not _is_exposed(minion):
return fnd
full = os.path.join(
__opts__['cachedir'], 'minions', minion, 'files', pushed_file
)
if os.path.isfile(full) \
and not salt.fileserver.is_file_ignored(__opts__, full):
fnd['path'] = full
fnd['rel'] = path
fnd['stat'] = list(os.stat(full))
return fnd
return fnd | ValueError | dataset/ETHPy150Open saltstack/salt/salt/fileserver/minionfs.py/find_file |
def file_hash(load, fnd):
'''
Return a file hash, the hash type is set in the master config file
'''
path = fnd['path']
ret = {}
if 'env' in load:
salt.utils.warn_until(
'Oxygen',
'Parameter \'env\' has been detected in the argument list. This '
'parameter is no longer used and has been replaced by \'saltenv\' '
'as of Salt Carbon. This warning will be removed in Salt Oxygen.'
)
load.pop('env')
if load['saltenv'] not in envs():
return {}
# if the file doesn't exist, we can't get a hash
if not path or not os.path.isfile(path):
return ret
# set the hash_type as it is determined by config-- so mechanism won't change that
ret['hash_type'] = __opts__['hash_type']
# check if the hash is cached
# cache file's contents should be "hash:mtime"
cache_path = os.path.join(
__opts__['cachedir'],
'minionfs/hash',
load['saltenv'],
'{0}.hash.{1}'.format(fnd['rel'], __opts__['hash_type'])
)
# if we have a cache, serve that if the mtime hasn't changed
if os.path.exists(cache_path):
try:
with salt.utils.fopen(cache_path, 'rb') as fp_:
try:
hsum, mtime = fp_.read().split(':')
except __HOLE__:
log.debug(
'Fileserver attempted to read incomplete cache file. '
'Retrying.'
)
file_hash(load, fnd)
return ret
if os.path.getmtime(path) == mtime:
# check if mtime changed
ret['hsum'] = hsum
return ret
# Can't use Python select() because we need Windows support
except os.error:
log.debug(
'Fileserver encountered lock when reading cache file. '
'Retrying.'
)
file_hash(load, fnd)
return ret
# if we don't have a cache entry-- lets make one
ret['hsum'] = salt.utils.get_hash(path, __opts__['hash_type'])
cache_dir = os.path.dirname(cache_path)
# make cache directory if it doesn't exist
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# save the cache object "hash:mtime"
cache_object = '{0}:{1}'.format(ret['hsum'], os.path.getmtime(path))
with salt.utils.flopen(cache_path, 'w') as fp_:
fp_.write(cache_object)
return ret | ValueError | dataset/ETHPy150Open saltstack/salt/salt/fileserver/minionfs.py/file_hash |
@staticmethod
def get_pyaudio():
"""
Imports the pyaudio module and checks its version. Throws exceptions if pyaudio can't be found or a wrong version is installed
"""
try:
import pyaudio
except __HOLE__:
raise AttributeError("Could not find PyAudio; check installation")
from distutils.version import LooseVersion
if LooseVersion(pyaudio.__version__) < LooseVersion("0.2.9"):
raise AttributeError("PyAudio 0.2.9 or later is required (found version {0})".format(pyaudio.__version__))
return pyaudio | ImportError | dataset/ETHPy150Open Uberi/speech_recognition/speech_recognition/__init__.py/Microphone.get_pyaudio |
def recognize_sphinx(self, audio_data, language = "en-US", show_all = False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using CMU Sphinx.
The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` or ``"en-GB"``, defaulting to US English. Out of the box, only ``en-US`` is supported. See `Notes on using `PocketSphinx <https://github.com/Uberi/speech_recognition/blob/master/reference/pocketsphinx.rst>`__ for information about installing other languages. This document is also included under ``reference/pocketsphinx.rst``.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the Sphinx ``pocketsphinx.pocketsphinx.Decoder`` object resulting from the recognition.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if there are any issues with the Sphinx installation.
"""
assert isinstance(audio_data, AudioData), "`audio_data` must be audio data"
assert isinstance(language, str), "`language` must be a string"
# import the PocketSphinx speech recognition module
try:
from pocketsphinx import pocketsphinx
from sphinxbase import sphinxbase
except __HOLE__:
raise RequestError("missing PocketSphinx module: ensure that PocketSphinx is set up correctly.")
except ValueError:
raise RequestError("bad PocketSphinx installation detected; make sure you have PocketSphinx version 0.0.9 or better.")
language_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "pocketsphinx-data", language)
if not os.path.isdir(language_directory):
raise RequestError("missing PocketSphinx language data directory: \"{0}\"".format(language_directory))
acoustic_parameters_directory = os.path.join(language_directory, "acoustic-model")
if not os.path.isdir(acoustic_parameters_directory):
raise RequestError("missing PocketSphinx language model parameters directory: \"{0}\"".format(acoustic_parameters_directory))
language_model_file = os.path.join(language_directory, "language-model.lm.bin")
if not os.path.isfile(language_model_file):
raise RequestError("missing PocketSphinx language model file: \"{0}\"".format(language_model_file))
phoneme_dictionary_file = os.path.join(language_directory, "pronounciation-dictionary.dict")
if not os.path.isfile(phoneme_dictionary_file):
raise RequestError("missing PocketSphinx phoneme dictionary file: \"{0}\"".format(phoneme_dictionary_file))
# create decoder object
config = pocketsphinx.Decoder.default_config()
config.set_string("-hmm", acoustic_parameters_directory) # set the path of the hidden Markov model (HMM) parameter files
config.set_string("-lm", language_model_file)
config.set_string("-dict", phoneme_dictionary_file)
config.set_string("-logfn", os.devnull) # disable logging (logging causes unwanted output in terminal)
decoder = pocketsphinx.Decoder(config)
# obtain audio data
raw_data = audio_data.get_raw_data(convert_rate = 16000, convert_width = 2) # the included language models require audio to be 16-bit mono 16 kHz in little-endian format
# obtain recognition results
decoder.start_utt() # begin utterance processing
decoder.process_raw(raw_data, False, True) # process audio data with recognition enabled (no_search = False), as a full utterance (full_utt = True)
decoder.end_utt() # stop utterance processing
if show_all: return decoder
# return results
hypothesis = decoder.hyp()
if hypothesis is not None: return hypothesis.hypstr
raise UnknownValueError() # no transcriptions available | ImportError | dataset/ETHPy150Open Uberi/speech_recognition/speech_recognition/__init__.py/Recognizer.recognize_sphinx |
def recognize_google(self, audio_data, key = None, language = "en-US", show_all = False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Speech Recognition API.
The Google Speech Recognition API key is specified by ``key``. If not specified, it uses a generic key that works out of the box. This should generally be used for personal or testing purposes only, as it **may be revoked by Google at any time**.
To obtain your own API key, simply following the steps on the `API Keys <http://www.chromium.org/developers/how-tos/api-keys>`__ page at the Chromium Developers site. In the Google Developers Console, Google Speech Recognition is listed as "Speech API".
The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in this `StackOverflow answer <http://stackoverflow.com/a/14302134>`__.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "`audio_data` must be audio data"
assert key is None or isinstance(key, str), "`key` must be `None` or a string"
assert isinstance(language, str), "`language` must be a string"
flac_data = audio_data.get_flac_data(
convert_rate = None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz
convert_width = None if audio_data.sample_width >= 2 else 2 # audio samples must be at least 16-bit
)
if key is None: key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw"
url = "http://www.google.com/speech-api/v2/recognize?{0}".format(urlencode({
"client": "chromium",
"lang": language,
"key": key,
}))
request = Request(url, data = flac_data, headers = {"Content-Type": "audio/x-flac; rate={0}".format(audio_data.sample_rate)})
# obtain audio transcription results
try:
response = urlopen(request)
except __HOLE__ as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
response_text = response.read().decode("utf-8")
# ignore any blank blocks
actual_result = []
for line in response_text.split("\n"):
if not line: continue
result = json.loads(line)["result"]
if len(result) != 0:
actual_result = result[0]
break
# return results
if show_all: return actual_result
if "alternative" not in actual_result: raise UnknownValueError()
for entry in actual_result["alternative"]:
if "transcript" in entry:
return entry["transcript"]
raise UnknownValueError() # no transcriptions available | HTTPError | dataset/ETHPy150Open Uberi/speech_recognition/speech_recognition/__init__.py/Recognizer.recognize_google |
def recognize_wit(self, audio_data, key, show_all = False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Wit.ai API.
The Wit.ai API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account <https://wit.ai/>`__ and creating an app. You will need to add at least one intent to the app before you can see the API key, though the actual intent settings don't matter.
To get the API key for a Wit.ai app, go to the app's overview page, go to the section titled "Make an API request", and look for something along the lines of ``Authorization: Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX``; ``XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`` is the API key. Wit.ai API keys are 32-character uppercase alphanumeric strings.
The recognition language is configured in the Wit.ai app settings.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://wit.ai/docs/http/20141022#get-intent-via-text-link>`__ as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "Data must be audio data"
assert isinstance(key, str), "`key` must be a string"
wav_data = audio_data.get_wav_data(
convert_rate = None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz
convert_width = 2 # audio samples should be 16-bit
)
url = "https://api.wit.ai/speech?v=20141022"
request = Request(url, data = wav_data, headers = {"Authorization": "Bearer {0}".format(key), "Content-Type": "audio/wav"})
try:
response = urlopen(request)
except __HOLE__ as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "_text" not in result or result["_text"] is None: raise UnknownValueError()
return result["_text"] | HTTPError | dataset/ETHPy150Open Uberi/speech_recognition/speech_recognition/__init__.py/Recognizer.recognize_wit |
def recognize_bing(self, audio_data, key, language = "en-US", show_all = False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Microsoft Bing Voice Recognition API.
The Microsoft Bing Voice Recognition API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account <https://www.microsoft.com/cognitive-services/en-us/speech-api>`__ with Microsoft Cognitive Services.
To get the API key, go to the `Microsoft Cognitive Services subscriptions overview <https://www.microsoft.com/cognitive-services/en-us/subscriptions>`__, go to the entry titled "Speech", and look for the key under the "Keys" column. Microsoft Bing Voice Recognition API keys are 32-character lowercase hexadecimal strings.
The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation <https://www.microsoft.com/cognitive-services/en-us/speech-api/documentation/api-reference-rest/BingVoiceRecognition#user-content-4-supported-locales>`__.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://www.microsoft.com/cognitive-services/en-us/speech-api/documentation/api-reference-rest/BingVoiceRecognition#user-content-3-voice-recognition-responses>`__ as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "Data must be audio data"
assert isinstance(key, str), "`key` must be a string"
assert isinstance(language, str), "`language` must be a string"
access_token, expire_time = getattr(self, "bing_cached_access_token", None), getattr(self, "bing_cached_access_token_expiry", None)
allow_caching = True
try:
from time import monotonic # we need monotonic time to avoid being affected by system clock changes, but this is only available in Python 3.3+
except __HOLE__:
try:
from monotonic import monotonic # use time.monotonic backport for Python 2 if available (from https://pypi.python.org/pypi/monotonic)
except (ImportError, RuntimeError):
expire_time = None # monotonic time not available, don't cache access tokens
allow_caching = False # don't allow caching, since monotonic time isn't available
if expire_time is None or monotonic() > expire_time: # caching not enabled, first credential request, or the access token from the previous one expired
# get an access token using OAuth
credential_url = "https://oxford-speech.cloudapp.net/token/issueToken"
credential_request = Request(credential_url, data = urlencode({
"grant_type": "client_credentials",
"client_id": "python",
"client_secret": key,
"scope": "https://speech.platform.bing.com"
}).encode("utf-8"))
if allow_caching:
start_time = monotonic()
try:
credential_response = urlopen(credential_request)
except HTTPError as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
credential_text = credential_response.read().decode("utf-8")
credentials = json.loads(credential_text)
access_token, expiry_seconds = credentials["access_token"], float(credentials["expires_in"])
if allow_caching:
# save the token for the duration it is valid for
self.bing_cached_access_token = access_token
self.bing_cached_access_token_expiry = start_time + expiry_seconds
wav_data = audio_data.get_wav_data(
convert_rate = 16000, # audio samples must be 8kHz or 16 kHz
convert_width = 2 # audio samples should be 16-bit
)
url = "https://speech.platform.bing.com/recognize/query?{0}".format(urlencode({
"version": "3.0",
"requestid": uuid.uuid4(),
"appID": "D4D52672-91D7-4C74-8AD8-42B1D98141A5",
"format": "json",
"locale": language,
"device.os": "wp7",
"scenarios": "ulm",
"instanceid": uuid.uuid4(),
"result.profanitymarkup": "0",
}))
request = Request(url, data = wav_data, headers = {
"Authorization": "Bearer {0}".format(access_token),
"Content-Type": "audio/wav; samplerate=16000; sourcerate={0}; trustsourcerate=true".format(audio_data.sample_rate),
})
try:
response = urlopen(request)
except HTTPError as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "header" not in result or "lexical" not in result["header"]: raise UnknownValueError()
return result["header"]["lexical"] | ImportError | dataset/ETHPy150Open Uberi/speech_recognition/speech_recognition/__init__.py/Recognizer.recognize_bing |
def recognize_api(self, audio_data, client_access_token, language = "en", show_all = False):
"""
Perform speech recognition on ``audio_data`` (an ``AudioData`` instance), using the api.ai Speech to Text API.
The api.ai API client access token is specified by ``client_access_token``. Unfortunately, this is not available without `signing up for an account <https://console.api.ai/api-client/#/signup>`__ and creating an api.ai agent. To get the API client access token, go to the agent settings, go to the section titled "API keys", and look for "Client access token". API client access tokens are 32-character lowercase hexadecimal strings.
Although the recognition language is specified when creating the api.ai agent in the web console, it must also be provided in the ``language`` parameter as an RFC5646 language tag like ``"en"`` (US English) or ``"fr"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation <https://api.ai/docs/reference/#languages>`__.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://api.ai/docs/reference/#a-namepost-multipost-query-multipart>`__ as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "Data must be audio data"
assert isinstance(client_access_token, str), "`username` must be a string"
assert isinstance(language, str), "`language` must be a string"
wav_data = audio_data.get_wav_data(convert_rate = 16000, convert_width = 2) # audio must be 16-bit mono 16 kHz
url = "https://api.api.ai/v1/query"
# pick a good multipart boundary; one that is guaranteed not to be in the text
while True:
boundary = "{0:>016x}".format(random.randrange(0x10000000000000000)) # generate a random boundary
if boundary.encode("utf-8") not in wav_data:
break
data = (
b"--" + boundary.encode("utf-8") + b"\r\n" +
b"Content-Disposition: form-data; name=\"request\"\r\n" +
b"Content-Type: application/json\r\n" +
b"\r\n" +
b"{\"v\": \"20150910\", \"lang\": \"" + language.encode("utf-8") + b"\"}\r\n" +
b"--" + boundary.encode("utf-8") + b"\r\n" +
b"Content-Disposition: form-data; name=\"voiceData\"; filename=\"audio.wav\"\r\n" +
b"Content-Type: audio/wav\r\n" +
b"\r\n" +
wav_data + b"\r\n" +
b"--" + boundary.encode("utf-8") + b"--\r\n"
)
request = Request(url, data = data, headers = {
"Authorization": "Bearer {0}".format(client_access_token),
"Content-Length": str(len(data)),
"Expect": "100-continue",
"Content-Type": "multipart/form-data; boundary={0}".format(boundary)
})
try:
response = urlopen(request)
except __HOLE__ as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "asr" not in result or result["asr"] is None:
raise UnknownValueError()
return result["result"]["resolvedQuery"] | HTTPError | dataset/ETHPy150Open Uberi/speech_recognition/speech_recognition/__init__.py/Recognizer.recognize_api |
def recognize_ibm(self, audio_data, username, password, language = "en-US", show_all = False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the IBM Speech to Text API.
The IBM Speech to Text username and password are specified by ``username`` and ``password``, respectively. Unfortunately, these are not available without `signing up for an account <https://console.ng.bluemix.net/registration/>`__. Once logged into the Bluemix console, follow the instructions for `creating an IBM Watson service instance <http://www.ibm.com/smarterplanet/us/en/ibmwatson/developercloud/doc/getting_started/gs-credentials.shtml>`__, where the Watson service is "Speech To Text". IBM Speech to Text usernames are strings of the form XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX, while passwords are mixed-case alphanumeric strings.
The recognition language is determined by ``language``, an RFC5646 language tag with a dialect like ``"en-US"`` (US English) or ``"zh-CN"`` (Mandarin Chinese), defaulting to US English. The supported language values are listed under the ``model`` parameter of the `audio recognition API documentation <http://www.ibm.com/smarterplanet/us/en/ibmwatson/developercloud/speech-to-text/api/v1/#recognize_audio_sessionless12>`__, in the form ``LANGUAGE_BroadbandModel``, where ``LANGUAGE`` is the language value.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <http://www.ibm.com/smarterplanet/us/en/ibmwatson/developercloud/speech-to-text/api/v1/#recognize_audio_sessionless12>`__ as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "Data must be audio data"
assert isinstance(username, str), "`username` must be a string"
assert isinstance(password, str), "`password` must be a string"
flac_data = audio_data.get_flac_data(
convert_rate = None if audio_data.sample_rate >= 16000 else 16000, # audio samples should be at least 16 kHz
convert_width = None if audio_data.sample_width >= 2 else 2 # audio samples should be at least 16-bit
)
model = "{0}_BroadbandModel".format(language)
url = "https://stream.watsonplatform.net/speech-to-text/api/v1/recognize?{0}".format(urlencode({
"profanity_filter": "false",
"continuous": "true",
"model": model,
}))
request = Request(url, data = flac_data, headers = {"Content-Type": "audio/x-flac"})
if hasattr("", "encode"):
authorization_value = base64.standard_b64encode("{0}:{1}".format(username, password).encode("utf-8")).decode("utf-8")
else:
authorization_value = base64.standard_b64encode("{0}:{1}".format(username, password))
request.add_header("Authorization", "Basic {0}".format(authorization_value))
try:
response = urlopen(request)
except __HOLE__ as e:
raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code)))) # use getattr to be compatible with Python 2.6
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "results" not in result or len(result["results"]) < 1 or "alternatives" not in result["results"][0]:
raise UnknownValueError()
transcription = []
for utterance in result["results"]:
if "alternatives" not in utterance: raise UnknownValueError()
for hypothesis in utterance["alternatives"]:
if "transcript" in hypothesis:
transcription.append(hypothesis["transcript"])
return "\n".join(transcription) | HTTPError | dataset/ETHPy150Open Uberi/speech_recognition/speech_recognition/__init__.py/Recognizer.recognize_ibm |
def get_flac_converter():
# determine which converter executable to use
system = platform.system()
path = os.path.dirname(os.path.abspath(__file__)) # directory of the current module file, where all the FLAC bundled binaries are stored
flac_converter = shutil_which("flac") # check for installed version first
if flac_converter is None: # flac utility is not installed
compatible_machine_types = ["i386", "i486", "i586", "i686", "i786", "x86", "x86_64", "AMD64"] # whitelist of machine types our bundled binaries are compatible with
if system == "Windows" and platform.machine() in compatible_machine_types:
flac_converter = os.path.join(path, "flac-win32.exe")
elif system == "Linux" and platform.machine() in compatible_machine_types:
flac_converter = os.path.join(path, "flac-linux-x86")
elif system == "Darwin" and platform.machine() in compatible_machine_types:
flac_converter = os.path.join(path, "flac-mac")
else:
raise OSError("FLAC conversion utility not available - consider installing the FLAC command line application using `brew install flac` or your operating system's equivalent")
# mark FLAC converter as executable if possible
try:
stat_info = os.stat(flac_converter)
os.chmod(flac_converter, stat_info.st_mode | stat.S_IEXEC)
except __HOLE__: pass
return flac_converter | OSError | dataset/ETHPy150Open Uberi/speech_recognition/speech_recognition/__init__.py/get_flac_converter |
def recognize_att(self, audio_data, app_key, app_secret, language = "en-US", show_all = False):
authorization_url = "https://api.att.com/oauth/v4/token"
authorization_body = "client_id={0}&client_secret={1}&grant_type=client_credentials&scope=SPEECH".format(app_key, app_secret)
try: authorization_response = urlopen(authorization_url, data = authorization_body.encode("utf-8"))
except __HOLE__ as e: raise RequestError("credential request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code))))
except URLError as e: raise RequestError("credential connection failed: {0}".format(e.reason))
authorization_text = authorization_response.read().decode("utf-8")
authorization_bearer = json.loads(authorization_text).get("access_token")
if authorization_bearer is None: raise RequestError("missing OAuth access token in requested credentials")
wav_data = audio_data.get_wav_data(convert_rate = 8000 if audio_data.sample_rate < 16000 else 16000, convert_width = 2)
request = Request("https://api.att.com/speech/v3/speechToText", data = wav_data, headers = {"Authorization": "Bearer {0}".format(authorization_bearer), "Content-Language": language, "Content-Type": "audio/wav"})
try: response = urlopen(request)
except HTTPError as e: raise RequestError("recognition request failed: {0}".format(getattr(e, "reason", "status {0}".format(e.code))))
except URLError as e: raise RequestError("recognition connection failed: {0}".format(e.reason))
result = json.loads(response.read().decode("utf-8"))
if show_all: return result
if "Recognition" not in result or "NBest" not in result["Recognition"]: raise UnknownValueError()
for entry in result["Recognition"]["NBest"]:
if entry.get("Grade") == "accept" and "ResultText" in entry: return entry["ResultText"]
raise UnknownValueError() # no transcriptions available | HTTPError | dataset/ETHPy150Open Uberi/speech_recognition/speech_recognition/__init__.py/recognize_att |
def test_ovs_restart(self):
self._setup_for_dvr_test()
reset_methods = (
'reset_ovs_parameters', 'reset_dvr_parameters',
'setup_dvr_flows_on_integ_br', 'setup_dvr_flows_on_tun_br',
'setup_dvr_flows_on_phys_br', 'setup_dvr_mac_flows_on_all_brs')
reset_mocks = [mock.patch.object(self.agent.dvr_agent, method).start()
for method in reset_methods]
tun_br = mock.create_autospec(self.agent.tun_br)
with mock.patch.object(self.agent,
'check_ovs_status',
return_value=constants.OVS_RESTARTED),\
mock.patch.object(self.agent,
'_agent_has_updates',
side_effect=TypeError('loop exit')),\
mock.patch.object(self.agent, 'tun_br', new=tun_br),\
mock.patch.object(self.agent, 'setup_physical_bridges'),\
mock.patch.object(self.agent, 'setup_integration_br'),\
mock.patch.object(self.agent, 'setup_tunnel_br'),\
mock.patch.object(self.agent, 'state_rpc'):
try:
self.agent.rpc_loop(polling_manager=mock.Mock())
except __HOLE__:
pass
self.assertTrue(all([x.called for x in reset_mocks])) | TypeError | dataset/ETHPy150Open openstack/neutron/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py/TestOvsDvrNeutronAgent.test_ovs_restart |
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except __HOLE__:
self[key] = value = self.creator(key)
return value | KeyError | dataset/ETHPy150Open ralfonso/theory/theory/model/mpdutil.py/PopulateDict.__getitem__ |
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except __HOLE__:
return self.__missing__(key) | KeyError | dataset/ETHPy150Open ralfonso/theory/theory/model/mpdutil.py/defaultdict.__getitem__ |
def __call__(self, *args):
hashkey = (self, args)
try:
return ArgSingleton.instances[hashkey]
except __HOLE__:
instance = type.__call__(self, *args)
ArgSingleton.instances[hashkey] = instance
return instance | KeyError | dataset/ETHPy150Open ralfonso/theory/theory/model/mpdutil.py/ArgSingleton.__call__ |
def format_argspec_init(method, grouped=True):
"""format_argspec_plus with considerations for typical __init__ methods
Wraps format_argspec_plus with error handling strategies for typical
__init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return format_argspec_plus(method, grouped=grouped)
except __HOLE__:
self_arg = 'self'
if method is object.__init__:
args = grouped and '(self)' or 'self'
else:
args = (grouped and '(self, *args, **kwargs)'
or 'self, *args, **kwargs')
return dict(self_arg='self', args=args, apply_pos=args, apply_kw=args) | TypeError | dataset/ETHPy150Open ralfonso/theory/theory/model/mpdutil.py/format_argspec_init |
def getargspec_init(method):
"""inspect.getargspec with considerations for typical __init__ methods
Wraps inspect.getargspec with error handling for typical __init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return inspect.getargspec(method)
except __HOLE__:
if method is object.__init__:
return (['self'], None, None, None)
else:
return (['self'], 'args', 'kwargs', None) | TypeError | dataset/ETHPy150Open ralfonso/theory/theory/model/mpdutil.py/getargspec_init |
def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None,
name='self.proxy', from_instance=None):
"""Automates delegation of __specials__ for a proxying type."""
if only:
dunders = only
else:
if skip is None:
skip = ('__slots__', '__del__', '__getattribute__',
'__metaclass__', '__getstate__', '__setstate__')
dunders = [m for m in dir(from_cls)
if (m.startswith('__') and m.endswith('__') and
not hasattr(into_cls, m) and m not in skip)]
for method in dunders:
try:
spec = inspect.getargspec(getattr(from_cls, method))
fn_args = inspect.formatargspec(spec[0])
d_args = inspect.formatargspec(spec[0][1:])
except __HOLE__:
fn_args = '(self, *args, **kw)'
d_args = '(*args, **kw)'
py = ("def %(method)s%(fn_args)s: "
"return %(name)s.%(method)s%(d_args)s" % locals())
env = from_instance is not None and {name: from_instance} or {}
exec py in env
setattr(into_cls, method, env[method]) | TypeError | dataset/ETHPy150Open ralfonso/theory/theory/model/mpdutil.py/monkeypatch_proxied_specials |
def __getattr__(self, key):
try:
return self._data[key]
except __HOLE__:
raise AttributeError(key) | KeyError | dataset/ETHPy150Open ralfonso/theory/theory/model/mpdutil.py/OrderedProperties.__getattr__ |
def __delattr__(self, key):
try:
del self._tdict[(thread.get_ident(), key)]
except __HOLE__:
raise AttributeError(key) | KeyError | dataset/ETHPy150Open ralfonso/theory/theory/model/mpdutil.py/ThreadLocal.__delattr__ |
def __getattr__(self, key):
try:
return self._tdict[(thread.get_ident(), key)]
except __HOLE__:
raise AttributeError(key) | KeyError | dataset/ETHPy150Open ralfonso/theory/theory/model/mpdutil.py/ThreadLocal.__getattr__ |
def discard(self, value):
try:
self.remove(value)
except __HOLE__:
pass | KeyError | dataset/ETHPy150Open ralfonso/theory/theory/model/mpdutil.py/IdentitySet.discard |
def pop(self):
try:
pair = self._members.popitem()
return pair[1]
except __HOLE__:
raise KeyError('pop from an empty set') | KeyError | dataset/ETHPy150Open ralfonso/theory/theory/model/mpdutil.py/IdentitySet.pop |
def __call__(self):
key = self._get_key()
try:
return self.registry[key]
except __HOLE__:
return self.registry.setdefault(key, self.createfunc()) | KeyError | dataset/ETHPy150Open ralfonso/theory/theory/model/mpdutil.py/ScopedRegistry.__call__ |
def clear(self):
try:
del self.registry[self._get_key()]
except __HOLE__:
pass | KeyError | dataset/ETHPy150Open ralfonso/theory/theory/model/mpdutil.py/ScopedRegistry.clear |
def function_named(fn, name):
"""Return a function with a given __name__.
Will assign to __name__ and return the original function if possible on
the Python implementation, otherwise a new function will be constructed.
"""
try:
fn.__name__ = name
except __HOLE__:
fn = new.function(fn.func_code, fn.func_globals, name,
fn.func_defaults, fn.func_closure)
return fn | TypeError | dataset/ETHPy150Open ralfonso/theory/theory/model/mpdutil.py/function_named |
def cache_decorator(func):
"""apply caching to the return value of a function."""
name = '_cached_' + func.__name__
def do_with_cache(self, *args, **kwargs):
try:
return getattr(self, name)
except __HOLE__:
value = func(self, *args, **kwargs)
setattr(self, name, value)
return value
return do_with_cache | AttributeError | dataset/ETHPy150Open ralfonso/theory/theory/model/mpdutil.py/cache_decorator |
def reset_cached(instance, name):
try:
delattr(instance, '_cached_' + name)
except __HOLE__:
pass | AttributeError | dataset/ETHPy150Open ralfonso/theory/theory/model/mpdutil.py/reset_cached |
def _cleanup(self, wr, key=None):
if key is None:
key = wr.key
try:
del self._weakrefs[key]
except (KeyError, AttributeError): # pragma: no cover
pass # pragma: no cover
try:
del self.by_id[key]
except (__HOLE__, AttributeError): # pragma: no cover
pass # pragma: no cover | KeyError | dataset/ETHPy150Open ralfonso/theory/theory/model/mpdutil.py/WeakIdentityMapping._cleanup |
def main(self, args, initial_options):
options, args = self.parser.parse_args(args)
self.merge_options(initial_options, options)
level = 1 # Notify
level += options.verbose
level -= options.quiet
level = logger.level_for_integer(4 - level)
complete_log = []
logger.consumers.extend(
[(level, sys.stdout),
(logger.DEBUG, complete_log.append)])
if options.log_explicit_levels:
logger.explicit_levels = True
self.setup_logging()
#TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ''.join(options.exists_action)
if not ssl and options.insecure:
os.environ['PIP_INSECURE'] = '1'
if options.cert:
os.environ['PIP_CERT'] = options.cert
if options.require_venv:
# If a venv is required check if it can really be found
if not os.environ.get('VIRTUAL_ENV'):
logger.fatal('Could not find an activated virtualenv (required).')
sys.exit(VIRTUALENV_NOT_FOUND)
if options.log:
log_fp = open_logfile(options.log, 'a')
logger.consumers.append((logger.DEBUG, log_fp))
else:
log_fp = None
socket.setdefaulttimeout(options.timeout or None)
urlopen.setup(proxystr=options.proxy, prompting=not options.no_input)
exit = SUCCESS
store_log = False
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
exit = status
except (InstallationError, UninstallationError):
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except BadCommand:
e = sys.exc_info()[1]
logger.fatal(str(e))
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except CommandError:
e = sys.exc_info()[1]
logger.fatal('ERROR: %s' % e)
logger.info('Exception information:\n%s' % format_exc())
exit = ERROR
except __HOLE__:
logger.fatal('Operation cancelled by user')
logger.info('Exception information:\n%s' % format_exc())
store_log = True
exit = ERROR
except:
logger.fatal('Exception:\n%s' % format_exc())
store_log = True
exit = UNKNOWN_ERROR
if log_fp is not None:
log_fp.close()
if store_log:
log_fn = options.log_file
text = '\n'.join(complete_log)
try:
log_fp = open_logfile(log_fn, 'w')
except IOError:
temp = tempfile.NamedTemporaryFile(delete=False)
log_fn = temp.name
log_fp = open_logfile(log_fn, 'w')
logger.fatal('Storing complete log in %s' % log_fn)
log_fp.write(text)
log_fp.close()
return exit | KeyboardInterrupt | dataset/ETHPy150Open balanced/status.balancedpayments.com/venv/lib/python2.7/site-packages/pip-1.3.1-py2.7.egg/pip/basecommand.py/Command.main |
def parse_markdown_readme():
"""
Convert README.md to RST via pandoc, and load into memory
(fallback to LONG_DESCRIPTION on failure)
"""
# Attempt to run pandoc on markdown file
import subprocess
try:
subprocess.call(
['pandoc', '-t', 'rst', '-o', 'README.rst', 'README.md']
)
except __HOLE__:
return LONG_DESCRIPTION
# Attempt to load output
try:
readme = open(os.path.join(
os.path.dirname(__file__),
'README.rst'
))
except IOError:
return LONG_DESCRIPTION
return readme.read() | OSError | dataset/ETHPy150Open wq/wq.db/setup.py/parse_markdown_readme |
def _positive_non_zero_int(argument_value):
if argument_value is None:
return None
try:
value = int(argument_value)
except __HOLE__:
msg = "%s must be an integer" % argument_value
raise argparse.ArgumentTypeError(msg)
if value <= 0:
msg = "%s must be greater than 0" % argument_value
raise argparse.ArgumentTypeError(msg)
return value | ValueError | dataset/ETHPy150Open openstack/python-cloudkittyclient/cloudkittyclient/shell.py/_positive_non_zero_int |
def main(args=None):
try:
if args is None:
args = sys.argv[1:]
CloudkittyShell().main(args)
except Exception as e:
if '--debug' in args or '-d' in args:
raise
else:
print(encodeutils.safe_encode(six.text_type(e)), file=sys.stderr)
sys.exit(1)
except __HOLE__:
print("Stopping Cloudkitty Client", file=sys.stderr)
sys.exit(130) | KeyboardInterrupt | dataset/ETHPy150Open openstack/python-cloudkittyclient/cloudkittyclient/shell.py/main |
def get_thumbnail_name(self, thumbnail_options, transparent=False,
high_resolution=False):
"""
A version of ``Thumbnailer.get_thumbnail_name`` that produces a
reproducible thumbnail name that can be converted back to the original
filename.
"""
path, source_filename = os.path.split(self.name)
source_extension = os.path.splitext(source_filename)[1][1:]
if self.thumbnail_preserve_extensions is True or \
(self.thumbnail_preserve_extensions and
source_extension.lower() in self.thumbnail_preserve_extensions):
extension = source_extension
elif transparent:
extension = self.thumbnail_transparency_extension
else:
extension = self.thumbnail_extension
extension = extension or 'jpg'
thumbnail_options = thumbnail_options.copy()
size = tuple(thumbnail_options.pop('size'))
quality = thumbnail_options.pop('quality', self.thumbnail_quality)
initial_opts = ['%sx%s' % size, 'q%s' % quality]
opts = list(thumbnail_options.items())
opts.sort() # Sort the options so the file name is consistent.
opts = ['%s' % (v is not True and '%s-%s' % (k, v) or k)
for k, v in opts if v]
all_opts = '_'.join(initial_opts + opts)
basedir = self.thumbnail_basedir
subdir = self.thumbnail_subdir
# make sure our magic delimiter is not used in all_opts
all_opts = all_opts.replace('__', '_')
if high_resolution:
try:
all_opts += self.thumbnail_highres_infix
except __HOLE__:
all_opts += '@2x'
filename = '%s__%s.%s' % (source_filename, all_opts, extension)
return os.path.join(basedir, path, subdir, filename) | AttributeError | dataset/ETHPy150Open divio/django-filer/filer/utils/filer_easy_thumbnails.py/ThumbnailerNameMixin.get_thumbnail_name |
def extend(headers, line):
try:
header = headers.pop()
except __HOLE__:
# this means that we got invalid header
# ignore it
return
if isinstance(header, deque):
header.append(line)
headers.append(header)
else:
headers.append(deque((header, line))) | IndexError | dataset/ETHPy150Open mailgun/flanker/flanker/mime/message/headers/parsing.py/extend |
def get_datacube_root():
"""Return the directory containing the datacube python source files.
This returns the value of the DATACUBE_ROOT environment variable
if it is set, otherwise it returns the directory containing the
source code for this function (cube_util.get_datacube_root).
"""
try:
datacube_root = os.environ['DATACUBE_ROOT']
except __HOLE__:
this_file = inspect.getsourcefile(get_datacube_root)
datacube_root = os.path.dirname(os.path.abspath(this_file))
return datacube_root | KeyError | dataset/ETHPy150Open GeoscienceAustralia/agdc/src/cube_util.py/get_datacube_root |
def parse_date_from_string(date_string):
"""Attempt to parse a date from a command line or config file argument.
This function tries a series of date formats, and returns a date
object if one of them works, None otherwise.
"""
format_list = ['%Y%m%d',
'%d/%m/%Y',
'%Y-%m-%d'
]
# Try the formats in the order listed.
date = None
for date_format in format_list:
try:
date = datetime.datetime.strptime(date_string, date_format).date()
break
except __HOLE__:
pass
return date | ValueError | dataset/ETHPy150Open GeoscienceAustralia/agdc/src/cube_util.py/parse_date_from_string |
def create_directory(dirname):
"""Create dirname, including any intermediate directories necessary to
create the leaf directory."""
# Allow group permissions on the directory we are about to create
old_umask = os.umask(0o007)
try:
os.makedirs(dirname)
except __HOLE__, e:
if e.errno != errno.EEXIST or not os.path.isdir(dirname):
raise DatasetError('Directory %s could not be created' % dirname)
finally:
# Put back the old umask
os.umask(old_umask) | OSError | dataset/ETHPy150Open GeoscienceAustralia/agdc/src/cube_util.py/create_directory |
def clean(self, value):
value = super(ITSocialSecurityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = re.sub('\s', '', value).upper()
# Entities SSN are numeric-only
if value.isdigit():
try:
return vat_number_validation(value)
except ValueError:
raise ValidationError(self.error_messages['invalid'])
# Person SSN
else:
try:
return ssn_validation(value)
except (__HOLE__, IndexError):
raise ValidationError(self.error_messages['invalid']) | ValueError | dataset/ETHPy150Open django/django-localflavor/localflavor/it/forms.py/ITSocialSecurityNumberField.clean |
def clean(self, value):
value = super(ITVatNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
try:
return vat_number_validation(value)
except __HOLE__:
raise ValidationError(self.error_messages['invalid']) | ValueError | dataset/ETHPy150Open django/django-localflavor/localflavor/it/forms.py/ITVatNumberField.clean |
def _create_test_db(self, verbosity, autoclobber):
settings_dict = self.connection.settings_dict
if self.connection._DJANGO_VERSION >= 13:
test_name = self._get_test_db_name()
else:
if settings_dict['TEST_NAME']:
test_name = settings_dict['TEST_NAME']
else:
try:
from django.db.backends.base.creation import TEST_DATABASE_PREFIX
except __HOLE__:
# import location prior to Django 1.8
from django.db.backends.creation import TEST_DATABASE_PREFIX
test_name = TEST_DATABASE_PREFIX + settings_dict['NAME']
if self.connection._DJANGO_VERSION >= 17:
settings_dict['TEST']['NAME'] = test_name
else:
if not settings_dict['TEST_NAME']:
settings_dict['TEST_NAME'] = test_name
if not self.connection.test_create:
# use the existing database instead of creating a new one
if verbosity >= 1:
print("Dropping tables ... ")
self.connection.close()
settings_dict["NAME"] = test_name
cursor = self.connection.cursor()
qn = self.connection.ops.quote_name
sql = "SELECT TABLE_NAME, CONSTRAINT_NAME " \
"FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS " \
"WHERE CONSTRAINT_TYPE = 'FOREIGN KEY'"
for row in cursor.execute(sql).fetchall():
objs = (qn(row[0]), qn(row[1]))
cursor.execute("ALTER TABLE %s DROP CONSTRAINT %s" % objs)
for table in self.connection.introspection.get_table_list(cursor):
if verbosity >= 1:
print("Dropping table %s" % table)
cursor.execute('DROP TABLE %s' % qn(table))
self.connection.connection.commit()
return test_name
if self.connection.ops.on_azure_sql_db:
self.connection.close()
settings_dict["NAME"] = 'master'
return super(DatabaseCreation, self)._create_test_db(verbosity, autoclobber) | ImportError | dataset/ETHPy150Open lionheart/django-pyodbc/django_pyodbc/creation.py/DatabaseCreation._create_test_db |