code
stringlengths 64
7.01k
| docstring
stringlengths 2
15.8k
| text
stringlengths 144
19.2k
|
---|---|---|
#vtb
def rbac_policy_update(request, policy_id, **kwargs):
body = {: kwargs}
rbac_policy = neutronclient(request).update_rbac_policy(
policy_id, body=body).get()
return RBACPolicy(rbac_policy)
|
Update a RBAC Policy.
:param request: request context
:param policy_id: target policy id
:param target_tenant: target tenant of the policy
:return: RBACPolicy object
|
### Input:
Update a RBAC Policy.
:param request: request context
:param policy_id: target policy id
:param target_tenant: target tenant of the policy
:return: RBACPolicy object
### Response:
#vtb
def rbac_policy_update(request, policy_id, **kwargs):
body = {: kwargs}
rbac_policy = neutronclient(request).update_rbac_policy(
policy_id, body=body).get()
return RBACPolicy(rbac_policy)
|
#vtb
def query_hek(time, time_window=1):
hek_client = hek.HEKClient()
start_time = time - timedelta(hours=time_window)
end_time = time + timedelta(hours=time_window)
responses = hek_client.query(hek.attrs.Time(start_time, end_time))
return responses
|
requests hek responses for a given time
:param time: datetime object
:param time_window: how far in hours on either side of the input time to look for results
:return: hek response list
|
### Input:
requests hek responses for a given time
:param time: datetime object
:param time_window: how far in hours on either side of the input time to look for results
:return: hek response list
### Response:
#vtb
def query_hek(time, time_window=1):
hek_client = hek.HEKClient()
start_time = time - timedelta(hours=time_window)
end_time = time + timedelta(hours=time_window)
responses = hek_client.query(hek.attrs.Time(start_time, end_time))
return responses
|
#vtb
def style_from_dict(style_dict, include_defaults=True):
assert isinstance(style_dict, Mapping)
if include_defaults:
s2 = {}
s2.update(DEFAULT_STYLE_EXTENSIONS)
s2.update(style_dict)
style_dict = s2
token_to_attrs = {}
for ttype, styledef in sorted(style_dict.items()):
attrs = DEFAULT_ATTRS
if not in styledef:
for i in range(1, len(ttype) + 1):
try:
attrs = token_to_attrs[ttype[:-i]]
except KeyError:
pass
else:
break
for part in styledef.split():
if part == :
pass
elif part == :
attrs = attrs._replace(bold=True)
elif part == :
attrs = attrs._replace(bold=False)
elif part == :
attrs = attrs._replace(italic=True)
elif part == :
attrs = attrs._replace(italic=False)
elif part == :
attrs = attrs._replace(underline=True)
elif part == :
attrs = attrs._replace(underline=False)
elif part == :
attrs = attrs._replace(blink=True)
elif part == :
attrs = attrs._replace(blink=False)
elif part == :
attrs = attrs._replace(reverse=True)
elif part == :
attrs = attrs._replace(reverse=False)
elif part in (, , ):
pass
elif part.startswith():
pass
elif part.startswith():
attrs = attrs._replace(bgcolor=_colorformat(part[3:]))
else:
attrs = attrs._replace(color=_colorformat(part))
token_to_attrs[ttype] = attrs
return _StyleFromDict(token_to_attrs)
|
Create a ``Style`` instance from a dictionary or other mapping.
The dictionary is equivalent to the ``Style.styles`` dictionary from
pygments, with a few additions: it supports 'reverse' and 'blink'.
Usage::
style_from_dict({
Token: '#ff0000 bold underline',
Token.Title: 'blink',
Token.SomethingElse: 'reverse',
})
:param include_defaults: Include the defaults (built-in) styling for
selected text, etc...)
|
### Input:
Create a ``Style`` instance from a dictionary or other mapping.
The dictionary is equivalent to the ``Style.styles`` dictionary from
pygments, with a few additions: it supports 'reverse' and 'blink'.
Usage::
style_from_dict({
Token: '#ff0000 bold underline',
Token.Title: 'blink',
Token.SomethingElse: 'reverse',
})
:param include_defaults: Include the defaults (built-in) styling for
selected text, etc...)
### Response:
#vtb
def style_from_dict(style_dict, include_defaults=True):
assert isinstance(style_dict, Mapping)
if include_defaults:
s2 = {}
s2.update(DEFAULT_STYLE_EXTENSIONS)
s2.update(style_dict)
style_dict = s2
token_to_attrs = {}
for ttype, styledef in sorted(style_dict.items()):
attrs = DEFAULT_ATTRS
if not in styledef:
for i in range(1, len(ttype) + 1):
try:
attrs = token_to_attrs[ttype[:-i]]
except KeyError:
pass
else:
break
for part in styledef.split():
if part == :
pass
elif part == :
attrs = attrs._replace(bold=True)
elif part == :
attrs = attrs._replace(bold=False)
elif part == :
attrs = attrs._replace(italic=True)
elif part == :
attrs = attrs._replace(italic=False)
elif part == :
attrs = attrs._replace(underline=True)
elif part == :
attrs = attrs._replace(underline=False)
elif part == :
attrs = attrs._replace(blink=True)
elif part == :
attrs = attrs._replace(blink=False)
elif part == :
attrs = attrs._replace(reverse=True)
elif part == :
attrs = attrs._replace(reverse=False)
elif part in (, , ):
pass
elif part.startswith():
pass
elif part.startswith():
attrs = attrs._replace(bgcolor=_colorformat(part[3:]))
else:
attrs = attrs._replace(color=_colorformat(part))
token_to_attrs[ttype] = attrs
return _StyleFromDict(token_to_attrs)
|
#vtb
def merge(self, target, source,
target_comment=None, source_comment=None):
return TicketMergeRequest(self).post(target, source,
target_comment=target_comment,
source_comment=source_comment)
|
Merge the ticket(s) or ticket ID(s) in source into the target ticket.
:param target: ticket id or object to merge tickets into
:param source: ticket id, object or list of tickets or ids to merge into target
:param source_comment: optional comment for the source ticket(s)
:param target_comment: optional comment for the target ticket
:return: a JobStatus object
|
### Input:
Merge the ticket(s) or ticket ID(s) in source into the target ticket.
:param target: ticket id or object to merge tickets into
:param source: ticket id, object or list of tickets or ids to merge into target
:param source_comment: optional comment for the source ticket(s)
:param target_comment: optional comment for the target ticket
:return: a JobStatus object
### Response:
#vtb
def merge(self, target, source,
target_comment=None, source_comment=None):
return TicketMergeRequest(self).post(target, source,
target_comment=target_comment,
source_comment=source_comment)
|
#vtb
def _recursive_round(self, value, precision):
if hasattr(value, ):
return tuple(self._recursive_round(v, precision) for v in value)
return round(value, precision)
|
Round all numbers within an array or nested arrays
value: number or nested array of numbers
precision: integer valueue of number of decimals to keep
|
### Input:
Round all numbers within an array or nested arrays
value: number or nested array of numbers
precision: integer valueue of number of decimals to keep
### Response:
#vtb
def _recursive_round(self, value, precision):
if hasattr(value, ):
return tuple(self._recursive_round(v, precision) for v in value)
return round(value, precision)
|
#vtb
def Lomb_Scargle(data, precision, min_period, max_period, period_jobs=1):
time, mags, *err = data.T
scaled_mags = (mags-mags.mean())/mags.std()
minf, maxf = 2*np.pi/max_period, 2*np.pi/min_period
freqs = np.arange(minf, maxf, precision)
pgram = lombscargle(time, scaled_mags, freqs)
return 2*np.pi/freqs[np.argmax(pgram)]
|
Returns the period of *data* according to the
`Lomb-Scargle periodogram <https://en.wikipedia.org/wiki/Least-squares_spectral_analysis#The_Lomb.E2.80.93Scargle_periodogram>`_.
**Parameters**
data : array-like, shape = [n_samples, 2] or [n_samples, 3]
Array containing columns *time*, *mag*, and (optional) *error*.
precision : number
Distance between contiguous frequencies in search-space.
min_period : number
Minimum period in search-space.
max_period : number
Maximum period in search-space.
period_jobs : int, optional
Number of simultaneous processes to use while searching. Only one
process will ever be used, but argument is included to conform to
*periodogram* standards of :func:`find_period` (default 1).
**Returns**
period : number
The period of *data*.
|
### Input:
Returns the period of *data* according to the
`Lomb-Scargle periodogram <https://en.wikipedia.org/wiki/Least-squares_spectral_analysis#The_Lomb.E2.80.93Scargle_periodogram>`_.
**Parameters**
data : array-like, shape = [n_samples, 2] or [n_samples, 3]
Array containing columns *time*, *mag*, and (optional) *error*.
precision : number
Distance between contiguous frequencies in search-space.
min_period : number
Minimum period in search-space.
max_period : number
Maximum period in search-space.
period_jobs : int, optional
Number of simultaneous processes to use while searching. Only one
process will ever be used, but argument is included to conform to
*periodogram* standards of :func:`find_period` (default 1).
**Returns**
period : number
The period of *data*.
### Response:
#vtb
def Lomb_Scargle(data, precision, min_period, max_period, period_jobs=1):
time, mags, *err = data.T
scaled_mags = (mags-mags.mean())/mags.std()
minf, maxf = 2*np.pi/max_period, 2*np.pi/min_period
freqs = np.arange(minf, maxf, precision)
pgram = lombscargle(time, scaled_mags, freqs)
return 2*np.pi/freqs[np.argmax(pgram)]
|
#vtb
def to_json(self):
result = super(FieldsResource, self).to_json()
result[] = self.fields_with_locales()
return result
|
Returns the JSON Representation of the resource.
|
### Input:
Returns the JSON Representation of the resource.
### Response:
#vtb
def to_json(self):
result = super(FieldsResource, self).to_json()
result[] = self.fields_with_locales()
return result
|
#vtb
def _approxaA(self,R,vR,vT,z,vz,phi,interp=True,cindx=None):
if isinstance(R,(int,float,numpy.float32,numpy.float64)):
R= numpy.array([R])
vR= numpy.array([vR])
vT= numpy.array([vT])
z= numpy.array([z])
vz= numpy.array([vz])
phi= numpy.array([phi])
X= R*numpy.cos(phi)
Y= R*numpy.sin(phi)
Z= z
if cindx is None:
closestIndx= [self._find_closest_trackpoint(X[ii],Y[ii],Z[ii],
z[ii],vz[ii],phi[ii],
interp=interp,
xy=True,usev=False)
for ii in range(len(R))]
else:
closestIndx= cindx
out= numpy.empty((6,len(R)))
for ii in range(len(R)):
dxv= numpy.empty(6)
if interp:
dxv[0]= R[ii]-self._interpolatedObsTrack[closestIndx[ii],0]
dxv[1]= vR[ii]-self._interpolatedObsTrack[closestIndx[ii],1]
dxv[2]= vT[ii]-self._interpolatedObsTrack[closestIndx[ii],2]
dxv[3]= z[ii]-self._interpolatedObsTrack[closestIndx[ii],3]
dxv[4]= vz[ii]-self._interpolatedObsTrack[closestIndx[ii],4]
dxv[5]= phi[ii]-self._interpolatedObsTrack[closestIndx[ii],5]
jacIndx= self._find_closest_trackpoint(R[ii],vR[ii],vT[ii],
z[ii],vz[ii],phi[ii],
interp=False,
xy=False)
else:
dxv[0]= R[ii]-self._ObsTrack[closestIndx[ii],0]
dxv[1]= vR[ii]-self._ObsTrack[closestIndx[ii],1]
dxv[2]= vT[ii]-self._ObsTrack[closestIndx[ii],2]
dxv[3]= z[ii]-self._ObsTrack[closestIndx[ii],3]
dxv[4]= vz[ii]-self._ObsTrack[closestIndx[ii],4]
dxv[5]= phi[ii]-self._ObsTrack[closestIndx[ii],5]
jacIndx= closestIndx[ii]
dmJacIndx= (X[ii]-self._ObsTrackXY[jacIndx,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx,2])**2.
if jacIndx == 0:
jacIndx2= jacIndx+1
dmJacIndx2= (X[ii]-self._ObsTrackXY[jacIndx+1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx+1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx+1,2])**2.
elif jacIndx == self._nTrackChunks-1:
jacIndx2= jacIndx-1
dmJacIndx2= (X[ii]-self._ObsTrackXY[jacIndx-1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx-1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx-1,2])**2.
else:
dm1= (X[ii]-self._ObsTrackXY[jacIndx-1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx-1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx-1,2])**2.
dm2= (X[ii]-self._ObsTrackXY[jacIndx+1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx+1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx+1,2])**2.
if dm1 < dm2:
jacIndx2= jacIndx-1
dmJacIndx2= dm1
else:
jacIndx2= jacIndx+1
dmJacIndx2= dm2
ampJacIndx= numpy.sqrt(dmJacIndx)/(numpy.sqrt(dmJacIndx)\
+numpy.sqrt(dmJacIndx2))
if dxv[5] > numpy.pi:
dxv[5]-= 2.*numpy.pi
elif dxv[5] < -numpy.pi:
dxv[5]+= 2.*numpy.pi
out[:,ii]= numpy.dot((1.-ampJacIndx)*self._alljacsTrack[jacIndx,:,:]
+ampJacIndx*self._alljacsTrack[jacIndx2,:,:],
dxv)
if interp:
out[:,ii]+= self._interpolatedObsTrackAA[closestIndx[ii]]
else:
out[:,ii]+= self._ObsTrackAA[closestIndx[ii]]
return out
|
NAME:
_approxaA
PURPOSE:
return action-angle coordinates for a point based on the linear
approximation around the stream track
INPUT:
R,vR,vT,z,vz,phi - phase-space coordinates of the given point
interp= (True), if True, use the interpolated track
cindx= index of the closest point on the (interpolated) stream track if not given, determined from the dimensions given
OUTPUT:
(Or,Op,Oz,ar,ap,az)
HISTORY:
2013-12-03 - Written - Bovy (IAS)
2015-11-12 - Added weighted sum of two nearest Jacobians to help with smoothness - Bovy (UofT)
|
### Input:
NAME:
_approxaA
PURPOSE:
return action-angle coordinates for a point based on the linear
approximation around the stream track
INPUT:
R,vR,vT,z,vz,phi - phase-space coordinates of the given point
interp= (True), if True, use the interpolated track
cindx= index of the closest point on the (interpolated) stream track if not given, determined from the dimensions given
OUTPUT:
(Or,Op,Oz,ar,ap,az)
HISTORY:
2013-12-03 - Written - Bovy (IAS)
2015-11-12 - Added weighted sum of two nearest Jacobians to help with smoothness - Bovy (UofT)
### Response:
#vtb
def _approxaA(self,R,vR,vT,z,vz,phi,interp=True,cindx=None):
if isinstance(R,(int,float,numpy.float32,numpy.float64)):
R= numpy.array([R])
vR= numpy.array([vR])
vT= numpy.array([vT])
z= numpy.array([z])
vz= numpy.array([vz])
phi= numpy.array([phi])
X= R*numpy.cos(phi)
Y= R*numpy.sin(phi)
Z= z
if cindx is None:
closestIndx= [self._find_closest_trackpoint(X[ii],Y[ii],Z[ii],
z[ii],vz[ii],phi[ii],
interp=interp,
xy=True,usev=False)
for ii in range(len(R))]
else:
closestIndx= cindx
out= numpy.empty((6,len(R)))
for ii in range(len(R)):
dxv= numpy.empty(6)
if interp:
dxv[0]= R[ii]-self._interpolatedObsTrack[closestIndx[ii],0]
dxv[1]= vR[ii]-self._interpolatedObsTrack[closestIndx[ii],1]
dxv[2]= vT[ii]-self._interpolatedObsTrack[closestIndx[ii],2]
dxv[3]= z[ii]-self._interpolatedObsTrack[closestIndx[ii],3]
dxv[4]= vz[ii]-self._interpolatedObsTrack[closestIndx[ii],4]
dxv[5]= phi[ii]-self._interpolatedObsTrack[closestIndx[ii],5]
jacIndx= self._find_closest_trackpoint(R[ii],vR[ii],vT[ii],
z[ii],vz[ii],phi[ii],
interp=False,
xy=False)
else:
dxv[0]= R[ii]-self._ObsTrack[closestIndx[ii],0]
dxv[1]= vR[ii]-self._ObsTrack[closestIndx[ii],1]
dxv[2]= vT[ii]-self._ObsTrack[closestIndx[ii],2]
dxv[3]= z[ii]-self._ObsTrack[closestIndx[ii],3]
dxv[4]= vz[ii]-self._ObsTrack[closestIndx[ii],4]
dxv[5]= phi[ii]-self._ObsTrack[closestIndx[ii],5]
jacIndx= closestIndx[ii]
dmJacIndx= (X[ii]-self._ObsTrackXY[jacIndx,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx,2])**2.
if jacIndx == 0:
jacIndx2= jacIndx+1
dmJacIndx2= (X[ii]-self._ObsTrackXY[jacIndx+1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx+1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx+1,2])**2.
elif jacIndx == self._nTrackChunks-1:
jacIndx2= jacIndx-1
dmJacIndx2= (X[ii]-self._ObsTrackXY[jacIndx-1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx-1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx-1,2])**2.
else:
dm1= (X[ii]-self._ObsTrackXY[jacIndx-1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx-1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx-1,2])**2.
dm2= (X[ii]-self._ObsTrackXY[jacIndx+1,0])**2.\
+(Y[ii]-self._ObsTrackXY[jacIndx+1,1])**2.\
+(Z[ii]-self._ObsTrackXY[jacIndx+1,2])**2.
if dm1 < dm2:
jacIndx2= jacIndx-1
dmJacIndx2= dm1
else:
jacIndx2= jacIndx+1
dmJacIndx2= dm2
ampJacIndx= numpy.sqrt(dmJacIndx)/(numpy.sqrt(dmJacIndx)\
+numpy.sqrt(dmJacIndx2))
if dxv[5] > numpy.pi:
dxv[5]-= 2.*numpy.pi
elif dxv[5] < -numpy.pi:
dxv[5]+= 2.*numpy.pi
out[:,ii]= numpy.dot((1.-ampJacIndx)*self._alljacsTrack[jacIndx,:,:]
+ampJacIndx*self._alljacsTrack[jacIndx2,:,:],
dxv)
if interp:
out[:,ii]+= self._interpolatedObsTrackAA[closestIndx[ii]]
else:
out[:,ii]+= self._ObsTrackAA[closestIndx[ii]]
return out
|
#vtb
def validate_regex(ctx, param, value):
if not value:
return None
try:
re.compile(value)
except re.error:
raise click.BadParameter(.format(value))
return value
|
Validate that a provided regex compiles.
|
### Input:
Validate that a provided regex compiles.
### Response:
#vtb
def validate_regex(ctx, param, value):
if not value:
return None
try:
re.compile(value)
except re.error:
raise click.BadParameter(.format(value))
return value
|
#vtb
def runs(self, path="", filters={}, order="-created_at", per_page=None):
username, project, run = self._parse_path(path)
if not self._runs.get(path):
self._runs[path + str(filters) + str(order)] = Runs(self.client, username, project,
filters=filters, order=order, per_page=per_page)
return self._runs[path + str(filters) + str(order)]
|
Return a set of runs from a project that match the filters provided.
You can filter by config.*, summary.*, state, username, createdAt, etc.
The filters use the same query language as MongoDB:
https://docs.mongodb.com/manual/reference/operator/query
Order can be created_at, heartbeat_at, config.*.value, or summary.*. By default
the order is descending, if you prepend order with a + order becomes ascending.
|
### Input:
Return a set of runs from a project that match the filters provided.
You can filter by config.*, summary.*, state, username, createdAt, etc.
The filters use the same query language as MongoDB:
https://docs.mongodb.com/manual/reference/operator/query
Order can be created_at, heartbeat_at, config.*.value, or summary.*. By default
the order is descending, if you prepend order with a + order becomes ascending.
### Response:
#vtb
def runs(self, path="", filters={}, order="-created_at", per_page=None):
username, project, run = self._parse_path(path)
if not self._runs.get(path):
self._runs[path + str(filters) + str(order)] = Runs(self.client, username, project,
filters=filters, order=order, per_page=per_page)
return self._runs[path + str(filters) + str(order)]
|
#vtb
def mergebam(args):
p = OptionParser(mergebam.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) not in (2, 3):
sys.exit(not p.print_help())
if len(args) == 2:
idir1, outdir = args
dir1 = [idir1] if idir1.endswith(".bam") else iglob(idir1, "*.bam")
logging.debug("Homozygous mode")
dir2 = [""] * len(dir1)
elif len(args) == 3:
idir1, idir2, outdir = args
dir1 = [idir1] if idir1.endswith(".bam") else iglob(idir1, "*.bam")
dir2 = [idir2] if idir2.endswith(".bam") else iglob(idir2, "*.bam")
assert len(dir2) == 1, "Second pile must contain a single bam"
dir2 = [idir2] * len(dir1)
assert len(dir1) == len(dir2), "Two piles must contain same number of bams"
cmd = "samtools merge {} {} {} && samtools index {}"
cmds = []
mkdir(outdir)
for a, b in zip(dir1, dir2):
ia = op.basename(a).split(".")[0]
ib = op.basename(b).split(".")[0] if b else ia
outfile = op.join(outdir, "{}_{}.bam".format(ia, ib))
cmds.append(cmd.format(outfile, a, b, outfile))
p = Parallel(cmds, cpus=opts.cpus)
p.run()
|
%prog mergebam dir1 homo_outdir
or
%prog mergebam dir1 dir2/20.bam het_outdir
Merge sets of BAMs to make diploid. Two modes:
- Homozygous mode: pair-up the bams in the two folders and merge
- Heterozygous mode: pair the bams in first folder with a particular bam
|
### Input:
%prog mergebam dir1 homo_outdir
or
%prog mergebam dir1 dir2/20.bam het_outdir
Merge sets of BAMs to make diploid. Two modes:
- Homozygous mode: pair-up the bams in the two folders and merge
- Heterozygous mode: pair the bams in first folder with a particular bam
### Response:
#vtb
def mergebam(args):
p = OptionParser(mergebam.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) not in (2, 3):
sys.exit(not p.print_help())
if len(args) == 2:
idir1, outdir = args
dir1 = [idir1] if idir1.endswith(".bam") else iglob(idir1, "*.bam")
logging.debug("Homozygous mode")
dir2 = [""] * len(dir1)
elif len(args) == 3:
idir1, idir2, outdir = args
dir1 = [idir1] if idir1.endswith(".bam") else iglob(idir1, "*.bam")
dir2 = [idir2] if idir2.endswith(".bam") else iglob(idir2, "*.bam")
assert len(dir2) == 1, "Second pile must contain a single bam"
dir2 = [idir2] * len(dir1)
assert len(dir1) == len(dir2), "Two piles must contain same number of bams"
cmd = "samtools merge {} {} {} && samtools index {}"
cmds = []
mkdir(outdir)
for a, b in zip(dir1, dir2):
ia = op.basename(a).split(".")[0]
ib = op.basename(b).split(".")[0] if b else ia
outfile = op.join(outdir, "{}_{}.bam".format(ia, ib))
cmds.append(cmd.format(outfile, a, b, outfile))
p = Parallel(cmds, cpus=opts.cpus)
p.run()
|
#vtb
def detect(self, stream, threshold, threshold_type, trig_int, plotvar,
daylong=False, parallel_process=True, xcorr_func=None,
concurrency=None, cores=None, ignore_length=False,
group_size=None, overlap="calculate", debug=0,
full_peaks=False, save_progress=False,
process_cores=None, **kwargs):
party = Party()
template_groups = []
for master in self.templates:
for group in template_groups:
if master in group:
break
else:
new_group = [master]
for slave in self.templates:
if master.same_processing(slave) and master != slave:
new_group.append(slave)
template_groups.append(new_group)
for group in template_groups:
if len(group) == 0:
template_groups.remove(group)
for group in template_groups:
group_party = _group_detect(
templates=group, stream=stream.copy(), threshold=threshold,
threshold_type=threshold_type, trig_int=trig_int,
plotvar=plotvar, group_size=group_size, pre_processed=False,
daylong=daylong, parallel_process=parallel_process,
xcorr_func=xcorr_func, concurrency=concurrency, cores=cores,
ignore_length=ignore_length, overlap=overlap, debug=debug,
full_peaks=full_peaks, process_cores=process_cores, **kwargs)
party += group_party
if save_progress:
party.write("eqcorrscan_temporary_party")
if len(party) > 0:
for family in party:
if family is not None:
family.detections = family._uniq().detections
return party
|
Detect using a Tribe of templates within a continuous stream.
:type stream: `obspy.core.stream.Stream`
:param stream: Continuous data to detect within using the Template.
:type threshold: float
:param threshold:
Threshold level, if using `threshold_type='MAD'` then this will be
the multiple of the median absolute deviation.
:type threshold_type: str
:param threshold_type:
The type of threshold to be used, can be MAD, absolute or
av_chan_corr. See Note on thresholding below.
:type trig_int: float
:param trig_int:
Minimum gap between detections in seconds. If multiple detections
occur within trig_int of one-another, the one with the highest
cross-correlation sum will be selected.
:type plotvar: bool
:param plotvar:
Turn plotting on or off, see warning about plotting below
:type daylong: bool
:param daylong:
Set to True to use the
:func:`eqcorrscan.utils.pre_processing.dayproc` routine, which
preforms additional checks and is more efficient for day-long data
over other methods.
:type parallel_process: bool
:param parallel_process:
:type xcorr_func: str or callable
:param xcorr_func:
A str of a registered xcorr function or a callable for implementing
a custom xcorr function. For more information see:
:func:`eqcorrscan.utils.correlate.register_array_xcorr`
:type concurrency: str
:param concurrency:
The type of concurrency to apply to the xcorr function. Options are
'multithread', 'multiprocess', 'concurrent'. For more details see
:func:`eqcorrscan.utils.correlate.get_stream_xcorr`
:type cores: int
:param cores: Number of workers for procesisng and detection.
:type ignore_length: bool
:param ignore_length:
If using daylong=True, then dayproc will try check that the data
are there for at least 80% of the day, if you don't want this check
(which will raise an error if too much data are missing) then set
ignore_length=True. This is not recommended!
:type group_size: int
:param group_size:
Maximum number of templates to run at once, use to reduce memory
consumption, if unset will use all templates.
:type overlap: float
:param overlap:
Either None, "calculate" or a float of number of seconds to
overlap detection streams by. This is to counter the effects of
the delay-and-stack in calculating cross-correlation sums. Setting
overlap = "calculate" will work out the appropriate overlap based
on the maximum lags within templates.
:type debug: int
:param debug:
Debug level from 0-5 where five is more output, for debug levels
4 and 5, detections will not be computed in parallel.
:type full_peaks: bool
:param full_peaks: See `eqcorrscan.utils.findpeak.find_peaks2_short`
:type save_progress: bool
:param save_progress:
Whether to save the resulting party at every data step or not.
Useful for long-running processes.
:type process_cores: int
:param process_cores:
Number of processes to use for pre-processing (if different to
`cores`).
:return:
:class:`eqcorrscan.core.match_filter.Party` of Families of
detections.
.. Note::
`stream` must not be pre-processed. If your data contain gaps
you should *NOT* fill those gaps before using this method.
The pre-process functions (called within) will fill the gaps
internally prior to processing, process the data, then re-fill
the gaps with zeros to ensure correlations are not incorrectly
calculated within gaps. If your data have gaps you should pass a
merged stream without the `fill_value` argument
(e.g.: `stream = stream.merge()`).
.. Note::
Detections are not corrected for `pre-pick`, the
detection.detect_time corresponds to the beginning of the earliest
template channel at detection.
.. warning::
Picks included in the output Party.get_catalog() will not be
corrected for pre-picks in the template.
.. note::
**Data overlap:**
Internally this routine shifts and trims the data according to the
offsets in the template (e.g. if trace 2 starts 2 seconds after
trace 1 in the template then the continuous data will be shifted
by 2 seconds to align peak correlations prior to summing).
Because of this, detections at the start and end of continuous
data streams **may be missed**. The maximum time-period that
might be missing detections is the maximum offset in the template.
To work around this, if you are conducting matched-filter
detections through long-duration continuous data, we suggest
using some overlap (a few seconds, on the order of the maximum
offset in the templates) in the continuous data. You will then
need to post-process the detections (which should be done anyway
to remove duplicates). See below note for how `overlap` argument
affects data internally if `stream` is longer than the processing
length.
.. Note::
If `stream` is longer than processing length, this routine will
ensure that data overlap between loops, which will lead to no
missed detections at data start-stop points (see above note).
This will result in end-time not being strictly
honoured, so detections may occur after the end-time set. This is
because data must be run in the correct process-length.
.. note::
**Thresholding:**
**MAD** threshold is calculated as the:
.. math::
threshold {\\times} (median(abs(cccsum)))
where :math:`cccsum` is the cross-correlation sum for a given
template.
**absolute** threshold is a true absolute threshold based on the
cccsum value.
**av_chan_corr** is based on the mean values of single-channel
cross-correlations assuming all data are present as required for
the template, e.g:
.. math::
av\_chan\_corr\_thresh=threshold \\times (cccsum /
len(template))
where :math:`template` is a single template from the input and the
length is the number of channels within this template.
|
### Input:
Detect using a Tribe of templates within a continuous stream.
:type stream: `obspy.core.stream.Stream`
:param stream: Continuous data to detect within using the Template.
:type threshold: float
:param threshold:
Threshold level, if using `threshold_type='MAD'` then this will be
the multiple of the median absolute deviation.
:type threshold_type: str
:param threshold_type:
The type of threshold to be used, can be MAD, absolute or
av_chan_corr. See Note on thresholding below.
:type trig_int: float
:param trig_int:
Minimum gap between detections in seconds. If multiple detections
occur within trig_int of one-another, the one with the highest
cross-correlation sum will be selected.
:type plotvar: bool
:param plotvar:
Turn plotting on or off, see warning about plotting below
:type daylong: bool
:param daylong:
Set to True to use the
:func:`eqcorrscan.utils.pre_processing.dayproc` routine, which
preforms additional checks and is more efficient for day-long data
over other methods.
:type parallel_process: bool
:param parallel_process:
:type xcorr_func: str or callable
:param xcorr_func:
A str of a registered xcorr function or a callable for implementing
a custom xcorr function. For more information see:
:func:`eqcorrscan.utils.correlate.register_array_xcorr`
:type concurrency: str
:param concurrency:
The type of concurrency to apply to the xcorr function. Options are
'multithread', 'multiprocess', 'concurrent'. For more details see
:func:`eqcorrscan.utils.correlate.get_stream_xcorr`
:type cores: int
:param cores: Number of workers for procesisng and detection.
:type ignore_length: bool
:param ignore_length:
If using daylong=True, then dayproc will try check that the data
are there for at least 80% of the day, if you don't want this check
(which will raise an error if too much data are missing) then set
ignore_length=True. This is not recommended!
:type group_size: int
:param group_size:
Maximum number of templates to run at once, use to reduce memory
consumption, if unset will use all templates.
:type overlap: float
:param overlap:
Either None, "calculate" or a float of number of seconds to
overlap detection streams by. This is to counter the effects of
the delay-and-stack in calculating cross-correlation sums. Setting
overlap = "calculate" will work out the appropriate overlap based
on the maximum lags within templates.
:type debug: int
:param debug:
Debug level from 0-5 where five is more output, for debug levels
4 and 5, detections will not be computed in parallel.
:type full_peaks: bool
:param full_peaks: See `eqcorrscan.utils.findpeak.find_peaks2_short`
:type save_progress: bool
:param save_progress:
Whether to save the resulting party at every data step or not.
Useful for long-running processes.
:type process_cores: int
:param process_cores:
Number of processes to use for pre-processing (if different to
`cores`).
:return:
:class:`eqcorrscan.core.match_filter.Party` of Families of
detections.
.. Note::
`stream` must not be pre-processed. If your data contain gaps
you should *NOT* fill those gaps before using this method.
The pre-process functions (called within) will fill the gaps
internally prior to processing, process the data, then re-fill
the gaps with zeros to ensure correlations are not incorrectly
calculated within gaps. If your data have gaps you should pass a
merged stream without the `fill_value` argument
(e.g.: `stream = stream.merge()`).
.. Note::
Detections are not corrected for `pre-pick`, the
detection.detect_time corresponds to the beginning of the earliest
template channel at detection.
.. warning::
Picks included in the output Party.get_catalog() will not be
corrected for pre-picks in the template.
.. note::
**Data overlap:**
Internally this routine shifts and trims the data according to the
offsets in the template (e.g. if trace 2 starts 2 seconds after
trace 1 in the template then the continuous data will be shifted
by 2 seconds to align peak correlations prior to summing).
Because of this, detections at the start and end of continuous
data streams **may be missed**. The maximum time-period that
might be missing detections is the maximum offset in the template.
To work around this, if you are conducting matched-filter
detections through long-duration continuous data, we suggest
using some overlap (a few seconds, on the order of the maximum
offset in the templates) in the continuous data. You will then
need to post-process the detections (which should be done anyway
to remove duplicates). See below note for how `overlap` argument
affects data internally if `stream` is longer than the processing
length.
.. Note::
If `stream` is longer than processing length, this routine will
ensure that data overlap between loops, which will lead to no
missed detections at data start-stop points (see above note).
This will result in end-time not being strictly
honoured, so detections may occur after the end-time set. This is
because data must be run in the correct process-length.
.. note::
**Thresholding:**
**MAD** threshold is calculated as the:
.. math::
threshold {\\times} (median(abs(cccsum)))
where :math:`cccsum` is the cross-correlation sum for a given
template.
**absolute** threshold is a true absolute threshold based on the
cccsum value.
**av_chan_corr** is based on the mean values of single-channel
cross-correlations assuming all data are present as required for
the template, e.g:
.. math::
av\_chan\_corr\_thresh=threshold \\times (cccsum /
len(template))
where :math:`template` is a single template from the input and the
length is the number of channels within this template.
### Response:
#vtb
def detect(self, stream, threshold, threshold_type, trig_int, plotvar,
daylong=False, parallel_process=True, xcorr_func=None,
concurrency=None, cores=None, ignore_length=False,
group_size=None, overlap="calculate", debug=0,
full_peaks=False, save_progress=False,
process_cores=None, **kwargs):
party = Party()
template_groups = []
for master in self.templates:
for group in template_groups:
if master in group:
break
else:
new_group = [master]
for slave in self.templates:
if master.same_processing(slave) and master != slave:
new_group.append(slave)
template_groups.append(new_group)
for group in template_groups:
if len(group) == 0:
template_groups.remove(group)
for group in template_groups:
group_party = _group_detect(
templates=group, stream=stream.copy(), threshold=threshold,
threshold_type=threshold_type, trig_int=trig_int,
plotvar=plotvar, group_size=group_size, pre_processed=False,
daylong=daylong, parallel_process=parallel_process,
xcorr_func=xcorr_func, concurrency=concurrency, cores=cores,
ignore_length=ignore_length, overlap=overlap, debug=debug,
full_peaks=full_peaks, process_cores=process_cores, **kwargs)
party += group_party
if save_progress:
party.write("eqcorrscan_temporary_party")
if len(party) > 0:
for family in party:
if family is not None:
family.detections = family._uniq().detections
return party
|
#vtb
def find_executable(executable, path=None):
if sys.platform != :
return distutils.spawn.find_executable(executable, path)
if path is None:
path = os.environ[]
paths = path.split(os.pathsep)
extensions = os.environ.get(, ).split(os.pathsep)
base, ext = os.path.splitext(executable)
if not os.path.isfile(executable):
for p in paths:
for ext in extensions:
f = os.path.join(p, base + ext)
if os.path.isfile(f):
return f
return None
else:
return executable
|
As distutils.spawn.find_executable, but on Windows, look up
every extension declared in PATHEXT instead of just `.exe`
|
### Input:
As distutils.spawn.find_executable, but on Windows, look up
every extension declared in PATHEXT instead of just `.exe`
### Response:
#vtb
def find_executable(executable, path=None):
if sys.platform != :
return distutils.spawn.find_executable(executable, path)
if path is None:
path = os.environ[]
paths = path.split(os.pathsep)
extensions = os.environ.get(, ).split(os.pathsep)
base, ext = os.path.splitext(executable)
if not os.path.isfile(executable):
for p in paths:
for ext in extensions:
f = os.path.join(p, base + ext)
if os.path.isfile(f):
return f
return None
else:
return executable
|
#vtb
def print(root):
def print_before(previous=0, defined=None, is_last=False):
defined = defined or {}
ret =
if previous != 0:
for i in range(previous - 1):
if i in defined:
ret +=
else:
ret +=
defined = defined or set()
defined.add(previous)
for i in range(len(rule.to_symbols) - 1):
yield callback(rule.to_symbols[i], previous + 1, defined, False)
defined.remove(previous)
yield callback(rule.to_symbols[-1], previous + 1, defined, True)
res = Traversing.traverse_separated(root, rule_traverse, nonterminal_traverse, terminal_traverse)
return str.join("", res)
|
Transform the parsed tree to the string. Expects tree like structure.
You can see example output below.
(R)SplitRules26
|--(N)Iterate
| `--(R)SplitRules30
| `--(N)Symb
| `--(R)SplitRules4
| `--(T)e
`--(N)Concat
`--(R)SplitRules27
`--(N)Iterate
`--(R)SplitRules30
`--(N)Symb
`--(R)SplitRules5
`--(T)f
:param root: Root node of the parsed tree.
:return: String representing the parsed tree (ends with newline).
|
### Input:
Transform the parsed tree to the string. Expects tree like structure.
You can see example output below.
(R)SplitRules26
|--(N)Iterate
| `--(R)SplitRules30
| `--(N)Symb
| `--(R)SplitRules4
| `--(T)e
`--(N)Concat
`--(R)SplitRules27
`--(N)Iterate
`--(R)SplitRules30
`--(N)Symb
`--(R)SplitRules5
`--(T)f
:param root: Root node of the parsed tree.
:return: String representing the parsed tree (ends with newline).
### Response:
#vtb
def print(root):
def print_before(previous=0, defined=None, is_last=False):
defined = defined or {}
ret =
if previous != 0:
for i in range(previous - 1):
if i in defined:
ret +=
else:
ret +=
defined = defined or set()
defined.add(previous)
for i in range(len(rule.to_symbols) - 1):
yield callback(rule.to_symbols[i], previous + 1, defined, False)
defined.remove(previous)
yield callback(rule.to_symbols[-1], previous + 1, defined, True)
res = Traversing.traverse_separated(root, rule_traverse, nonterminal_traverse, terminal_traverse)
return str.join("", res)
|
#vtb
def fullname(self):
prefix = ""
if self.parent:
if self.parent.fullname:
prefix = self.parent.fullname + ":"
else:
return ""
return prefix + self.name
|
includes the full path with parent names
|
### Input:
includes the full path with parent names
### Response:
#vtb
def fullname(self):
prefix = ""
if self.parent:
if self.parent.fullname:
prefix = self.parent.fullname + ":"
else:
return ""
return prefix + self.name
|
#vtb
def to_bytes(s, encoding=None, errors=None):
if not isinstance(s, bytes):
return ( % s).encode(encoding or , errors or )
elif not encoding or encoding == :
return s
else:
d = s.decode()
return d.encode(encoding, errors or )
|
Convert *s* into bytes
|
### Input:
Convert *s* into bytes
### Response:
#vtb
def to_bytes(s, encoding=None, errors=None):
if not isinstance(s, bytes):
return ( % s).encode(encoding or , errors or )
elif not encoding or encoding == :
return s
else:
d = s.decode()
return d.encode(encoding, errors or )
|
#vtb
def igrf12syn(isv, date, itype, alt, lat, elong):
p, q, cl, sl = [0.] * 105, [0.] * 105, [0.] * 13, [0.] * 13
x, y, z = 0., 0., 0.
if date < 1900.0 or date > 2025.0:
f = 1.0
print( + str(date))
print()
print()
return x, y, z, f
elif date >= 2015.0:
if date > 2020.0:
print()
print( + str(date) + )
t = date - 2015.0
tc = 1.0
if isv == 1:
t = 1.0
tc = 0.0
ll = 3060
nmx = 13
nc = nmx * (nmx + 2)
kmx = (nmx + 1) * (nmx + 2) / 2
else:
t = 0.2 * (date - 1900.0)
ll = int(t)
t = t - ll
if date < 1995.0:
nmx = 10
nc = nmx * (nmx + 2)
ll = nc * ll
kmx = (nmx + 1) * (nmx + 2) / 2
else:
nmx = 13
nc = nmx * (nmx + 2)
ll = round(0.2 * (date - 1995.0))
ll = 120 * 19 + nc * ll
kmx = (nmx + 1) * (nmx + 2) / 2
tc = 1.0 - t
if isv == 1:
tc = -0.2
t = 0.2
colat = 90-lat
r = alt
one = colat / FACT
ct = np.cos(one)
st = np.sin(one)
one = elong / FACT
cl[0] = np.cos(one)
sl[0] = np.sin(one)
cd = 1.0
sd = 0.0
l = 1
m = 1
n = 0
if itype != 2:
gclat, gclon, r = geodetic2geocentric(np.arctan2(st, ct), alt)
ct, st = np.cos(gclat), np.sin(gclat)
cd, sd = np.cos(gclon), np.sin(gclon)
ratio = 6371.2 / r
rr = ratio * ratio
p[0] = 1.0
p[2] = st
q[0] = 0.0
q[2] = ct
fn, gn = n, n-1
for k in range(2, int(kmx)+1):
if n < m:
m = 0
n = n + 1
rr = rr * ratio
fn = n
gn = n - 1
fm = m
if m != n:
gmm = m * m
one = np.sqrt(fn * fn - gmm)
two = np.sqrt(gn * gn - gmm) / one
three = (fn + gn) / one
i = k - n
j = i - n + 1
p[k - 1] = three * ct * p[i - 1] - two * p[j - 1]
q[k - 1] = three * (ct * q[i - 1] - st * p[i - 1]) - two * q[j - 1]
else:
if k != 3:
one = np.sqrt(1.0 - 0.5 / fm)
j = k - n - 1
p[k-1] = one * st * p[j-1]
q[k-1] = one * (st * q[j-1] + ct * p[j-1])
cl[m-1] = cl[m - 2] * cl[0] - sl[m - 2] * sl[0]
sl[m-1] = sl[m - 2] * cl[0] + cl[m - 2] * sl[0]
lm = ll + l
one = (tc * gh[int(lm-1)] + t * gh[int(lm + nc-1)]) * rr
if m == 0:
x = x + one * q[k - 1]
z = z - (fn + 1.0) * one * p[k - 1]
l = l + 1
else:
two = (tc * gh[int(lm)] + t * gh[int(lm + nc)]) * rr
three = one * cl[m-1] + two * sl[m-1]
x = x + three * q[k-1]
z = z - (fn + 1.0) * three * p[k-1]
if st == 0.0:
y = y + (one * sl[m - 1] - two * cl[m - 1]) * q[k - 1] * ct
else:
y = y + (one * sl[m-1] - two * cl[m-1]) * fm * p[k-1] / st
l = l + 2
m = m+1
one = x
x = x * cd + z * sd
z = z * cd - one * sd
f = np.sqrt(x * x + y * y + z * z)
return x, y, z, f
|
This is a synthesis routine for the 12th generation IGRF as agreed
in December 2014 by IAGA Working Group V-MOD. It is valid 1900.0 to
2020.0 inclusive. Values for dates from 1945.0 to 2010.0 inclusive are
definitive, otherwise they are non-definitive.
INPUT
isv = 0 if main-field values are required
isv = 1 if secular variation values are required
date = year A.D. Must be greater than or equal to 1900.0 and
less than or equal to 2025.0. Warning message is given
for dates greater than 2020.0. Must be double precision.
itype = 1 if geodetic (spheroid)
itype = 2 if geocentric (sphere)
alt = height in km above sea level if itype = 1
= distance from centre of Earth in km if itype = 2 (>3485 km)
lat = latitude (-90~90)
elong = east-longitude (0-360)
alt, colat and elong must be double precision.
OUTPUT
x = north component (nT) if isv = 0, nT/year if isv = 1
y = east component (nT) if isv = 0, nT/year if isv = 1
z = vertical component (nT) if isv = 0, nT/year if isv = 1
f = total intensity (nT) if isv = 0, rubbish if isv = 1
To get the other geomagnetic elements (D, I, H and secular
variations dD, dH, dI and dF) use routines ptoc and ptocsv.
Adapted from 8th generation version to include new maximum degree for
main-field models for 2000.0 and onwards and use WGS84 spheroid instead
of International Astronomical Union 1966 spheroid as recommended by IAGA
in July 2003. Reference radius remains as 6371.2 km - it is NOT the mean
radius (= 6371.0 km) but 6371.2 km is what is used in determining the
coefficients. Adaptation by Susan Macmillan, August 2003 (for
9th generation), December 2004, December 2009 \ December 2014.
Coefficients at 1995.0 incorrectly rounded (rounded up instead of
to even) included as these are the coefficients published in Excel
spreadsheet July 2005.
|
### Input:
This is a synthesis routine for the 12th generation IGRF as agreed
in December 2014 by IAGA Working Group V-MOD. It is valid 1900.0 to
2020.0 inclusive. Values for dates from 1945.0 to 2010.0 inclusive are
definitive, otherwise they are non-definitive.
INPUT
isv = 0 if main-field values are required
isv = 1 if secular variation values are required
date = year A.D. Must be greater than or equal to 1900.0 and
less than or equal to 2025.0. Warning message is given
for dates greater than 2020.0. Must be double precision.
itype = 1 if geodetic (spheroid)
itype = 2 if geocentric (sphere)
alt = height in km above sea level if itype = 1
= distance from centre of Earth in km if itype = 2 (>3485 km)
lat = latitude (-90~90)
elong = east-longitude (0-360)
alt, colat and elong must be double precision.
OUTPUT
x = north component (nT) if isv = 0, nT/year if isv = 1
y = east component (nT) if isv = 0, nT/year if isv = 1
z = vertical component (nT) if isv = 0, nT/year if isv = 1
f = total intensity (nT) if isv = 0, rubbish if isv = 1
To get the other geomagnetic elements (D, I, H and secular
variations dD, dH, dI and dF) use routines ptoc and ptocsv.
Adapted from 8th generation version to include new maximum degree for
main-field models for 2000.0 and onwards and use WGS84 spheroid instead
of International Astronomical Union 1966 spheroid as recommended by IAGA
in July 2003. Reference radius remains as 6371.2 km - it is NOT the mean
radius (= 6371.0 km) but 6371.2 km is what is used in determining the
coefficients. Adaptation by Susan Macmillan, August 2003 (for
9th generation), December 2004, December 2009 \ December 2014.
Coefficients at 1995.0 incorrectly rounded (rounded up instead of
to even) included as these are the coefficients published in Excel
spreadsheet July 2005.
### Response:
#vtb
def igrf12syn(isv, date, itype, alt, lat, elong):
p, q, cl, sl = [0.] * 105, [0.] * 105, [0.] * 13, [0.] * 13
x, y, z = 0., 0., 0.
if date < 1900.0 or date > 2025.0:
f = 1.0
print( + str(date))
print()
print()
return x, y, z, f
elif date >= 2015.0:
if date > 2020.0:
print()
print( + str(date) + )
t = date - 2015.0
tc = 1.0
if isv == 1:
t = 1.0
tc = 0.0
ll = 3060
nmx = 13
nc = nmx * (nmx + 2)
kmx = (nmx + 1) * (nmx + 2) / 2
else:
t = 0.2 * (date - 1900.0)
ll = int(t)
t = t - ll
if date < 1995.0:
nmx = 10
nc = nmx * (nmx + 2)
ll = nc * ll
kmx = (nmx + 1) * (nmx + 2) / 2
else:
nmx = 13
nc = nmx * (nmx + 2)
ll = round(0.2 * (date - 1995.0))
ll = 120 * 19 + nc * ll
kmx = (nmx + 1) * (nmx + 2) / 2
tc = 1.0 - t
if isv == 1:
tc = -0.2
t = 0.2
colat = 90-lat
r = alt
one = colat / FACT
ct = np.cos(one)
st = np.sin(one)
one = elong / FACT
cl[0] = np.cos(one)
sl[0] = np.sin(one)
cd = 1.0
sd = 0.0
l = 1
m = 1
n = 0
if itype != 2:
gclat, gclon, r = geodetic2geocentric(np.arctan2(st, ct), alt)
ct, st = np.cos(gclat), np.sin(gclat)
cd, sd = np.cos(gclon), np.sin(gclon)
ratio = 6371.2 / r
rr = ratio * ratio
p[0] = 1.0
p[2] = st
q[0] = 0.0
q[2] = ct
fn, gn = n, n-1
for k in range(2, int(kmx)+1):
if n < m:
m = 0
n = n + 1
rr = rr * ratio
fn = n
gn = n - 1
fm = m
if m != n:
gmm = m * m
one = np.sqrt(fn * fn - gmm)
two = np.sqrt(gn * gn - gmm) / one
three = (fn + gn) / one
i = k - n
j = i - n + 1
p[k - 1] = three * ct * p[i - 1] - two * p[j - 1]
q[k - 1] = three * (ct * q[i - 1] - st * p[i - 1]) - two * q[j - 1]
else:
if k != 3:
one = np.sqrt(1.0 - 0.5 / fm)
j = k - n - 1
p[k-1] = one * st * p[j-1]
q[k-1] = one * (st * q[j-1] + ct * p[j-1])
cl[m-1] = cl[m - 2] * cl[0] - sl[m - 2] * sl[0]
sl[m-1] = sl[m - 2] * cl[0] + cl[m - 2] * sl[0]
lm = ll + l
one = (tc * gh[int(lm-1)] + t * gh[int(lm + nc-1)]) * rr
if m == 0:
x = x + one * q[k - 1]
z = z - (fn + 1.0) * one * p[k - 1]
l = l + 1
else:
two = (tc * gh[int(lm)] + t * gh[int(lm + nc)]) * rr
three = one * cl[m-1] + two * sl[m-1]
x = x + three * q[k-1]
z = z - (fn + 1.0) * three * p[k-1]
if st == 0.0:
y = y + (one * sl[m - 1] - two * cl[m - 1]) * q[k - 1] * ct
else:
y = y + (one * sl[m-1] - two * cl[m-1]) * fm * p[k-1] / st
l = l + 2
m = m+1
one = x
x = x * cd + z * sd
z = z * cd - one * sd
f = np.sqrt(x * x + y * y + z * z)
return x, y, z, f
|
#vtb
def format(self):
subtag = self.data[]
if self.data[] == :
return subtag.upper()
if self.data[] == :
return subtag.capitalize()
return subtag
|
Get the subtag code conventional format according to RFC 5646 section 2.1.1.
:return: string -- subtag code conventional format.
|
### Input:
Get the subtag code conventional format according to RFC 5646 section 2.1.1.
:return: string -- subtag code conventional format.
### Response:
#vtb
def format(self):
subtag = self.data[]
if self.data[] == :
return subtag.upper()
if self.data[] == :
return subtag.capitalize()
return subtag
|
#vtb
def pull_byte(self, stack_pointer):
addr = stack_pointer.value
byte = self.memory.read_byte(addr)
stack_pointer.increment(1)
return byte
|
pulled a byte from stack
|
### Input:
pulled a byte from stack
### Response:
#vtb
def pull_byte(self, stack_pointer):
addr = stack_pointer.value
byte = self.memory.read_byte(addr)
stack_pointer.increment(1)
return byte
|
#vtb
def wallet_frontiers(self, wallet):
wallet = self._process_value(wallet, )
payload = {"wallet": wallet}
resp = self.call(, payload)
return resp.get() or {}
|
Returns a list of pairs of account and block hash representing the
head block starting for accounts from **wallet**
:param wallet: Wallet to return frontiers for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_frontiers(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
{
"xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000": "000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
}
|
### Input:
Returns a list of pairs of account and block hash representing the
head block starting for accounts from **wallet**
:param wallet: Wallet to return frontiers for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_frontiers(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
{
"xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000": "000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
}
### Response:
#vtb
def wallet_frontiers(self, wallet):
wallet = self._process_value(wallet, )
payload = {"wallet": wallet}
resp = self.call(, payload)
return resp.get() or {}
|
#vtb
def places_within_radius(
self, place=None, latitude=None, longitude=None, radius=0, **kwargs
):
kwargs[] = True
kwargs[] = True
kwargs[] = False
kwargs.setdefault(, )
unit = kwargs.setdefault(, )
if place is not None:
response = self.redis.georadiusbymember(
self.key, self._pickle(place), radius, **kwargs
)
elif (latitude is not None) and (longitude is not None):
response = self.redis.georadius(
self.key, longitude, latitude, radius, **kwargs
)
else:
raise ValueError(
)
ret = []
for item in response:
ret.append(
{
: self._unpickle(item[0]),
: item[1],
: unit,
: item[2][1],
: item[2][0],
}
)
return ret
|
Return descriptions of the places stored in the collection that are
within the circle specified by the given location and radius.
A list of dicts will be returned.
The center of the circle can be specified by the identifier of another
place in the collection with the *place* keyword argument.
Or, it can be specified by using both the *latitude* and *longitude*
keyword arguments.
By default the *radius* is given in kilometers, but you may also set
the *unit* keyword argument to ``'m'``, ``'mi'``, or ``'ft'``.
Limit the number of results returned with the *count* keyword argument.
Change the sorted order by setting the *sort* keyword argument to
``b'DESC'``.
|
### Input:
Return descriptions of the places stored in the collection that are
within the circle specified by the given location and radius.
A list of dicts will be returned.
The center of the circle can be specified by the identifier of another
place in the collection with the *place* keyword argument.
Or, it can be specified by using both the *latitude* and *longitude*
keyword arguments.
By default the *radius* is given in kilometers, but you may also set
the *unit* keyword argument to ``'m'``, ``'mi'``, or ``'ft'``.
Limit the number of results returned with the *count* keyword argument.
Change the sorted order by setting the *sort* keyword argument to
``b'DESC'``.
### Response:
#vtb
def places_within_radius(
self, place=None, latitude=None, longitude=None, radius=0, **kwargs
):
kwargs[] = True
kwargs[] = True
kwargs[] = False
kwargs.setdefault(, )
unit = kwargs.setdefault(, )
if place is not None:
response = self.redis.georadiusbymember(
self.key, self._pickle(place), radius, **kwargs
)
elif (latitude is not None) and (longitude is not None):
response = self.redis.georadius(
self.key, longitude, latitude, radius, **kwargs
)
else:
raise ValueError(
)
ret = []
for item in response:
ret.append(
{
: self._unpickle(item[0]),
: item[1],
: unit,
: item[2][1],
: item[2][0],
}
)
return ret
|
#vtb
def make_content_range(self, length):
rng = self.range_for_length(length)
if rng is not None:
return ContentRange(self.units, rng[0], rng[1], length)
|
Creates a :class:`~werkzeug.datastructures.ContentRange` object
from the current range and given content length.
|
### Input:
Creates a :class:`~werkzeug.datastructures.ContentRange` object
from the current range and given content length.
### Response:
#vtb
def make_content_range(self, length):
rng = self.range_for_length(length)
if rng is not None:
return ContentRange(self.units, rng[0], rng[1], length)
|
#vtb
def _increment_stage(self):
try:
if self._cur_stage < self._stage_count:
self._cur_stage += 1
else:
self._completed_flag.set()
except Exception, ex:
raise EnTKError(text=ex)
|
Purpose: Increment stage pointer. Also check if Pipeline has completed.
|
### Input:
Purpose: Increment stage pointer. Also check if Pipeline has completed.
### Response:
#vtb
def _increment_stage(self):
try:
if self._cur_stage < self._stage_count:
self._cur_stage += 1
else:
self._completed_flag.set()
except Exception, ex:
raise EnTKError(text=ex)
|
#vtb
def lock_pidfile_or_die(pidfile):
pid = os.getpid()
try:
remove_if_stale_pidfile(pidfile)
pid_write_file = pidfile + + str(pid)
fpid = open(pid_write_file, )
try:
fpid.write("%s\n" % pid)
finally:
fpid.close()
if not take_file_lock(pid_write_file, pidfile, "%s\n" % pid):
sys.exit(1)
except SystemExit:
raise
except Exception:
log.exception("unable to take pidfile")
sys.exit(1)
return pid
|
@pidfile:
must be a writable path
Exceptions are logged.
Returns the PID.
|
### Input:
@pidfile:
must be a writable path
Exceptions are logged.
Returns the PID.
### Response:
#vtb
def lock_pidfile_or_die(pidfile):
pid = os.getpid()
try:
remove_if_stale_pidfile(pidfile)
pid_write_file = pidfile + + str(pid)
fpid = open(pid_write_file, )
try:
fpid.write("%s\n" % pid)
finally:
fpid.close()
if not take_file_lock(pid_write_file, pidfile, "%s\n" % pid):
sys.exit(1)
except SystemExit:
raise
except Exception:
log.exception("unable to take pidfile")
sys.exit(1)
return pid
|
#vtb
def debug_print_strip_msg(self, i, line):
if self.debug_level == 2:
print(" Stripping Line %d: " % (i + 1, line.rstrip()))
elif self.debug_level > 2:
print(" Stripping Line %d:" % (i + 1))
hexdump(line)
|
Debug print indicating that an empty line is being skipped
:param i: The line number of the line that is being currently parsed
:param line: the parsed line
:return: None
|
### Input:
Debug print indicating that an empty line is being skipped
:param i: The line number of the line that is being currently parsed
:param line: the parsed line
:return: None
### Response:
#vtb
def debug_print_strip_msg(self, i, line):
if self.debug_level == 2:
print(" Stripping Line %d: " % (i + 1, line.rstrip()))
elif self.debug_level > 2:
print(" Stripping Line %d:" % (i + 1))
hexdump(line)
|
#vtb
def get_previous_price_list(self, currency, start_date, end_date):
start = start_date.strftime()
end = end_date.strftime()
url = (
.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = self._decode_rates(response)
price_dict = data.get(, {})
return price_dict
return {}
|
Get List of prices between two dates
|
### Input:
Get List of prices between two dates
### Response:
#vtb
def get_previous_price_list(self, currency, start_date, end_date):
start = start_date.strftime()
end = end_date.strftime()
url = (
.format(
start, end, currency
)
)
response = requests.get(url)
if response.status_code == 200:
data = self._decode_rates(response)
price_dict = data.get(, {})
return price_dict
return {}
|
#vtb
def get_devices(self, condition=None, page_size=1000):
condition = validate_type(condition, type(None), Expression, *six.string_types)
page_size = validate_type(page_size, *six.integer_types)
params = {"embed": "true"}
if condition is not None:
params["condition"] = condition.compile()
for device_json in self._conn.iter_json_pages("/ws/DeviceCore", page_size=page_size, **params):
yield Device(self._conn, self._sci, device_json)
|
Iterates over each :class:`Device` for this device cloud account
Examples::
# get a list of all devices
all_devices = list(dc.devicecore.get_devices())
# build a mapping of devices by their vendor id using a
# dict comprehension
devices = dc.devicecore.get_devices() # generator object
devs_by_vendor_id = {d.get_vendor_id(): d for d in devices}
# iterate over all devices in 'minnesota' group and
# print the device mac and location
for device in dc.get_devices(group_path == 'minnesota'):
print "%s at %s" % (device.get_mac(), device.get_location())
:param condition: An :class:`.Expression` which defines the condition
which must be matched on the devicecore. If unspecified,
an iterator over all devices will be returned.
:param int page_size: The number of results to fetch in a
single page. In general, the default will suffice.
:returns: Iterator over each :class:`~Device` in this device cloud
account in the form of a generator object.
|
### Input:
Iterates over each :class:`Device` for this device cloud account
Examples::
# get a list of all devices
all_devices = list(dc.devicecore.get_devices())
# build a mapping of devices by their vendor id using a
# dict comprehension
devices = dc.devicecore.get_devices() # generator object
devs_by_vendor_id = {d.get_vendor_id(): d for d in devices}
# iterate over all devices in 'minnesota' group and
# print the device mac and location
for device in dc.get_devices(group_path == 'minnesota'):
print "%s at %s" % (device.get_mac(), device.get_location())
:param condition: An :class:`.Expression` which defines the condition
which must be matched on the devicecore. If unspecified,
an iterator over all devices will be returned.
:param int page_size: The number of results to fetch in a
single page. In general, the default will suffice.
:returns: Iterator over each :class:`~Device` in this device cloud
account in the form of a generator object.
### Response:
#vtb
def get_devices(self, condition=None, page_size=1000):
condition = validate_type(condition, type(None), Expression, *six.string_types)
page_size = validate_type(page_size, *six.integer_types)
params = {"embed": "true"}
if condition is not None:
params["condition"] = condition.compile()
for device_json in self._conn.iter_json_pages("/ws/DeviceCore", page_size=page_size, **params):
yield Device(self._conn, self._sci, device_json)
|
#vtb
def download_sample_and_align(job, sample, inputs, ids):
uuid, urls = sample
r1_url, r2_url = urls if len(urls) == 2 else (urls[0], None)
job.fileStore.logToMaster(.format(uuid, r1_url, r2_url))
ids[] = job.addChildJobFn(download_url_job, r1_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv()
if r2_url:
ids[] = job.addChildJobFn(download_url_job, r2_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv()
else:
ids[] = None
inputs.cores = min(inputs.maxCores, multiprocessing.cpu_count())
inputs.uuid = uuid
config = dict(**vars(inputs))
config.update(ids)
config = argparse.Namespace(**config)
bam_id = job.wrapJobFn(run_bwakit, config, sort=inputs.sort, trim=inputs.trim,
disk=inputs.file_size, cores=inputs.cores)
job.addFollowOn(bam_id)
output_name = uuid + + str(inputs.suffix) if inputs.suffix else uuid +
if urlparse(inputs.output_dir).scheme == :
bam_id.addChildJobFn(s3am_upload_job, file_id=bam_id.rv(), file_name=output_name, s3_dir=inputs.output_dir,
s3_key_path=inputs.ssec, cores=inputs.cores, disk=inputs.file_size)
else:
mkdir_p(inputs.ouput_dir)
bam_id.addChildJobFn(copy_file_job, name=output_name, file_id=bam_id.rv(), output_dir=inputs.output_dir,
disk=inputs.file_size)
|
Downloads the sample and runs BWA-kit
:param JobFunctionWrappingJob job: Passed by Toil automatically
:param tuple(str, list) sample: UUID and URLS for sample
:param Namespace inputs: Contains input arguments
:param dict ids: FileStore IDs for shared inputs
|
### Input:
Downloads the sample and runs BWA-kit
:param JobFunctionWrappingJob job: Passed by Toil automatically
:param tuple(str, list) sample: UUID and URLS for sample
:param Namespace inputs: Contains input arguments
:param dict ids: FileStore IDs for shared inputs
### Response:
#vtb
def download_sample_and_align(job, sample, inputs, ids):
uuid, urls = sample
r1_url, r2_url = urls if len(urls) == 2 else (urls[0], None)
job.fileStore.logToMaster(.format(uuid, r1_url, r2_url))
ids[] = job.addChildJobFn(download_url_job, r1_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv()
if r2_url:
ids[] = job.addChildJobFn(download_url_job, r2_url, s3_key_path=inputs.ssec, disk=inputs.file_size).rv()
else:
ids[] = None
inputs.cores = min(inputs.maxCores, multiprocessing.cpu_count())
inputs.uuid = uuid
config = dict(**vars(inputs))
config.update(ids)
config = argparse.Namespace(**config)
bam_id = job.wrapJobFn(run_bwakit, config, sort=inputs.sort, trim=inputs.trim,
disk=inputs.file_size, cores=inputs.cores)
job.addFollowOn(bam_id)
output_name = uuid + + str(inputs.suffix) if inputs.suffix else uuid +
if urlparse(inputs.output_dir).scheme == :
bam_id.addChildJobFn(s3am_upload_job, file_id=bam_id.rv(), file_name=output_name, s3_dir=inputs.output_dir,
s3_key_path=inputs.ssec, cores=inputs.cores, disk=inputs.file_size)
else:
mkdir_p(inputs.ouput_dir)
bam_id.addChildJobFn(copy_file_job, name=output_name, file_id=bam_id.rv(), output_dir=inputs.output_dir,
disk=inputs.file_size)
|
#vtb
def features_properties_null_remove(obj):
features = obj[]
for i in tqdm(range(len(features))):
if in features[i]:
properties = features[i][]
features[i][] = {p:properties[p] for p in properties if properties[p] is not None}
return obj
|
Remove any properties of features in the collection that have
entries mapping to a null (i.e., None) value
|
### Input:
Remove any properties of features in the collection that have
entries mapping to a null (i.e., None) value
### Response:
#vtb
def features_properties_null_remove(obj):
features = obj[]
for i in tqdm(range(len(features))):
if in features[i]:
properties = features[i][]
features[i][] = {p:properties[p] for p in properties if properties[p] is not None}
return obj
|
#vtb
def merge(self, keypath, value, op=):
negated = False
keypath = keypath[:]
if keypath[0] == :
negated = self.get_environment_variable(, pop=False, default=False)
if negated:
keypath[0] = "distractor"
if keypath not in self:
first_referent = None
if keypath[0] in [, ]:
has_targets = False
for _, referent in self.iter_singleton_referents():
has_targets = True
if keypath[1:] in referent:
first_referent = referent
break
if first_referent is None:
if has_targets:
raise CellConstructionFailure("Cannot merge; no target: %s" \
% (str(keypath)))
else:
raise CellConstructionFailure("Empty belief state")
cell = first_referent.get_value_from_path(keypath[1:]).stem()
self.add_cell(keypath, cell)
else:
raise Exception("Could not find Keypath %s" % (str(keypath)))
cell = self
if not isinstance(keypath, list):
keypath = [keypath]
for key in keypath:
cell = cell[key]
try:
return getattr(cell, op)(value)
except Contradiction as ctrd:
raise Contradiction("Could not merge %s with %s: %s " % (str(keypath), str(value), ctrd))
|
First gets the cell at BeliefState's keypath, or creates a new cell
from the first target that has that keypath (This could mess up if the
member its copying from has a different Cell or domain for that keypath.)
Second, this merges that cell with the value
|
### Input:
First gets the cell at BeliefState's keypath, or creates a new cell
from the first target that has that keypath (This could mess up if the
member its copying from has a different Cell or domain for that keypath.)
Second, this merges that cell with the value
### Response:
#vtb
def merge(self, keypath, value, op=):
negated = False
keypath = keypath[:]
if keypath[0] == :
negated = self.get_environment_variable(, pop=False, default=False)
if negated:
keypath[0] = "distractor"
if keypath not in self:
first_referent = None
if keypath[0] in [, ]:
has_targets = False
for _, referent in self.iter_singleton_referents():
has_targets = True
if keypath[1:] in referent:
first_referent = referent
break
if first_referent is None:
if has_targets:
raise CellConstructionFailure("Cannot merge; no target: %s" \
% (str(keypath)))
else:
raise CellConstructionFailure("Empty belief state")
cell = first_referent.get_value_from_path(keypath[1:]).stem()
self.add_cell(keypath, cell)
else:
raise Exception("Could not find Keypath %s" % (str(keypath)))
cell = self
if not isinstance(keypath, list):
keypath = [keypath]
for key in keypath:
cell = cell[key]
try:
return getattr(cell, op)(value)
except Contradiction as ctrd:
raise Contradiction("Could not merge %s with %s: %s " % (str(keypath), str(value), ctrd))
|
#vtb
def excel_to_sql(excel_file_path, engine,
read_excel_kwargs=None,
to_generic_type_kwargs=None,
to_sql_kwargs=None):
if read_excel_kwargs is None:
read_excel_kwargs = dict()
if to_sql_kwargs is None:
to_sql_kwargs = dict()
if to_generic_type_kwargs is None:
to_generic_type_kwargs = dict()
xl = pd.ExcelFile(excel_file_path)
for sheet_name in xl.sheet_names:
df = pd.read_excel(
excel_file_path, sheet_name,
**read_excel_kwargs.get(sheet_name, dict())
)
kwargs = to_generic_type_kwargs.get(sheet_name)
if kwargs:
data = to_dict_list_generic_type(df, **kwargs)
smart_insert(data, sheet_name, engine)
else:
df.to_sql(
sheet_name, engine, index=False,
**to_sql_kwargs.get(sheet_name, dict(if_exists="replace"))
)
|
Create a database from excel.
:param read_excel_kwargs: dict, arguments for ``pandas.read_excel`` method.
example: ``{"employee": {"skiprows": 10}, "department": {}}``
:param to_sql_kwargs: dict, arguments for ``pandas.DataFrame.to_sql``
method.
limitation:
1. If a integer column has None value, data type in database will be float.
Because pandas thinks that it is ``np.nan``.
2. If a string column looks like integer, ``pandas.read_excel()`` method
doesn't have options to convert it to string.
|
### Input:
Create a database from excel.
:param read_excel_kwargs: dict, arguments for ``pandas.read_excel`` method.
example: ``{"employee": {"skiprows": 10}, "department": {}}``
:param to_sql_kwargs: dict, arguments for ``pandas.DataFrame.to_sql``
method.
limitation:
1. If a integer column has None value, data type in database will be float.
Because pandas thinks that it is ``np.nan``.
2. If a string column looks like integer, ``pandas.read_excel()`` method
doesn't have options to convert it to string.
### Response:
#vtb
def excel_to_sql(excel_file_path, engine,
read_excel_kwargs=None,
to_generic_type_kwargs=None,
to_sql_kwargs=None):
if read_excel_kwargs is None:
read_excel_kwargs = dict()
if to_sql_kwargs is None:
to_sql_kwargs = dict()
if to_generic_type_kwargs is None:
to_generic_type_kwargs = dict()
xl = pd.ExcelFile(excel_file_path)
for sheet_name in xl.sheet_names:
df = pd.read_excel(
excel_file_path, sheet_name,
**read_excel_kwargs.get(sheet_name, dict())
)
kwargs = to_generic_type_kwargs.get(sheet_name)
if kwargs:
data = to_dict_list_generic_type(df, **kwargs)
smart_insert(data, sheet_name, engine)
else:
df.to_sql(
sheet_name, engine, index=False,
**to_sql_kwargs.get(sheet_name, dict(if_exists="replace"))
)
|
#vtb
def compute_alignments(self, prev_state, precomputed_values, mask=None):
WaSp = T.dot(prev_state, self.Wa)
UaH = precomputed_values
if UaH.ndim == 2:
preact = WaSp[:, None, :] + UaH[None, :, :]
else:
preact = WaSp[:, None, :] + UaH
act = T.activate(preact, )
align_scores = T.dot(act, self.Va)
if mask:
mask = (1 - mask) * -99.00
if align_scores.ndim == 3:
align_scores += mask[None, :]
else:
align_scores += mask
align_weights = T.nnet.softmax(align_scores)
return align_weights
|
Compute the alignment weights based on the previous state.
|
### Input:
Compute the alignment weights based on the previous state.
### Response:
#vtb
def compute_alignments(self, prev_state, precomputed_values, mask=None):
WaSp = T.dot(prev_state, self.Wa)
UaH = precomputed_values
if UaH.ndim == 2:
preact = WaSp[:, None, :] + UaH[None, :, :]
else:
preact = WaSp[:, None, :] + UaH
act = T.activate(preact, )
align_scores = T.dot(act, self.Va)
if mask:
mask = (1 - mask) * -99.00
if align_scores.ndim == 3:
align_scores += mask[None, :]
else:
align_scores += mask
align_weights = T.nnet.softmax(align_scores)
return align_weights
|
#vtb
def solve(self, lam):
s = weighted_graphtf(self.nnodes, self.y, self.weights, lam,
self.Dk.shape[0], self.Dk.shape[1], self.Dk.nnz,
self.Dk.row.astype(), self.Dk.col.astype(), self.Dk.data.astype(),
self.maxsteps, self.converge,
self.beta, self.u)
self.steps.append(s)
return self.beta
|
Solves the GFL for a fixed value of lambda.
|
### Input:
Solves the GFL for a fixed value of lambda.
### Response:
#vtb
def solve(self, lam):
s = weighted_graphtf(self.nnodes, self.y, self.weights, lam,
self.Dk.shape[0], self.Dk.shape[1], self.Dk.nnz,
self.Dk.row.astype(), self.Dk.col.astype(), self.Dk.data.astype(),
self.maxsteps, self.converge,
self.beta, self.u)
self.steps.append(s)
return self.beta
|
#vtb
def period(self):
return timedelta(seconds=2 * np.pi * np.sqrt(self.kep.a ** 3 / self.mu))
|
Period of the orbit as a timedelta
|
### Input:
Period of the orbit as a timedelta
### Response:
#vtb
def period(self):
return timedelta(seconds=2 * np.pi * np.sqrt(self.kep.a ** 3 / self.mu))
|
#vtb
def quandl_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
api_key = environ.get()
if api_key is None:
raise ValueError(
"Please set your QUANDL_API_KEY environment variable and retry."
)
raw_data = fetch_data_table(
api_key,
show_progress,
environ.get(, 5)
)
asset_metadata = gen_asset_metadata(
raw_data[[, ]],
show_progress
)
asset_db_writer.write(asset_metadata)
symbol_map = asset_metadata.symbol
sessions = calendar.sessions_in_range(start_session, end_session)
raw_data.set_index([, ], inplace=True)
daily_bar_writer.write(
parse_pricing_and_vol(
raw_data,
sessions,
symbol_map
),
show_progress=show_progress
)
raw_data.reset_index(inplace=True)
raw_data[] = raw_data[].astype()
raw_data[] = raw_data.symbol.cat.codes
adjustment_writer.write(
splits=parse_splits(
raw_data[[
,
,
,
]].loc[raw_data.split_ratio != 1],
show_progress=show_progress
),
dividends=parse_dividends(
raw_data[[
,
,
,
]].loc[raw_data.ex_dividend != 0],
show_progress=show_progress
)
)
|
quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset.
For more information on Quandl's API and how to obtain an API key,
please visit https://docs.quandl.com/docs#section-authentication
|
### Input:
quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset.
For more information on Quandl's API and how to obtain an API key,
please visit https://docs.quandl.com/docs#section-authentication
### Response:
#vtb
def quandl_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
api_key = environ.get()
if api_key is None:
raise ValueError(
"Please set your QUANDL_API_KEY environment variable and retry."
)
raw_data = fetch_data_table(
api_key,
show_progress,
environ.get(, 5)
)
asset_metadata = gen_asset_metadata(
raw_data[[, ]],
show_progress
)
asset_db_writer.write(asset_metadata)
symbol_map = asset_metadata.symbol
sessions = calendar.sessions_in_range(start_session, end_session)
raw_data.set_index([, ], inplace=True)
daily_bar_writer.write(
parse_pricing_and_vol(
raw_data,
sessions,
symbol_map
),
show_progress=show_progress
)
raw_data.reset_index(inplace=True)
raw_data[] = raw_data[].astype()
raw_data[] = raw_data.symbol.cat.codes
adjustment_writer.write(
splits=parse_splits(
raw_data[[
,
,
,
]].loc[raw_data.split_ratio != 1],
show_progress=show_progress
),
dividends=parse_dividends(
raw_data[[
,
,
,
]].loc[raw_data.ex_dividend != 0],
show_progress=show_progress
)
)
|
#vtb
def state(self):
return Emitter(weakref.proxy(self.lib), self.lib.jit_new_state())
|
Returns a new JIT state. You have to clean up by calling .destroy()
afterwards.
|
### Input:
Returns a new JIT state. You have to clean up by calling .destroy()
afterwards.
### Response:
#vtb
def state(self):
return Emitter(weakref.proxy(self.lib), self.lib.jit_new_state())
|
#vtb
def get_share_url_with_dirname(uk, shareid, dirname):
return .join([
const.PAN_URL, ,
, shareid,
, uk,
, encoder.encode_uri_component(dirname),
,
])
|
得到共享目录的链接
|
### Input:
得到共享目录的链接
### Response:
#vtb
def get_share_url_with_dirname(uk, shareid, dirname):
return .join([
const.PAN_URL, ,
, shareid,
, uk,
, encoder.encode_uri_component(dirname),
,
])
|
#vtb
def getEAnnotation(self, source):
for annotation in self.eAnnotations:
if annotation.source == source:
return annotation
return None
|
Return the annotation with a matching source attribute.
|
### Input:
Return the annotation with a matching source attribute.
### Response:
#vtb
def getEAnnotation(self, source):
for annotation in self.eAnnotations:
if annotation.source == source:
return annotation
return None
|
#vtb
def _read_response(self, response):
self.name = response[]
self.description = response[]
self.layoutName = response[]
self.archiveBrowsingEnabled = response[]
|
JSON Documentation: https://www.jfrog.com/confluence/display/RTF/Repository+Configuration+JSON
|
### Input:
JSON Documentation: https://www.jfrog.com/confluence/display/RTF/Repository+Configuration+JSON
### Response:
#vtb
def _read_response(self, response):
self.name = response[]
self.description = response[]
self.layoutName = response[]
self.archiveBrowsingEnabled = response[]
|
#vtb
def _analyze_file(self, f):
f.seek(0)
if self.CHECK_BOM:
encoding = self.has_bom(f)
f.seek(0)
else:
util.warn_deprecated(
" attribute is deprecated. "
"Please override 'has_bom` function to control or avoid BOM detection."
)
if encoding is None:
encoding = self._utf_strip_bom(self.header_check(f.read(1024)))
f.seek(0)
if encoding is None:
encoding = self._utf_strip_bom(self.content_check(f))
f.seek(0)
return encoding
|
Analyze the file.
|
### Input:
Analyze the file.
### Response:
#vtb
def _analyze_file(self, f):
f.seek(0)
if self.CHECK_BOM:
encoding = self.has_bom(f)
f.seek(0)
else:
util.warn_deprecated(
" attribute is deprecated. "
"Please override 'has_bom` function to control or avoid BOM detection."
)
if encoding is None:
encoding = self._utf_strip_bom(self.header_check(f.read(1024)))
f.seek(0)
if encoding is None:
encoding = self._utf_strip_bom(self.content_check(f))
f.seek(0)
return encoding
|
#vtb
def origin_east_asia(origin):
return origin_china(origin) or origin_japan(origin) \
or origin_mongolia(origin) or origin_south_korea(origin) \
or origin_taiwan(origin)
|
\
Returns if the origin is located in East Asia
Holds true for the following countries:
* China
* Japan
* Mongolia
* South Korea
* Taiwan
`origin`
The origin to check.
|
### Input:
\
Returns if the origin is located in East Asia
Holds true for the following countries:
* China
* Japan
* Mongolia
* South Korea
* Taiwan
`origin`
The origin to check.
### Response:
#vtb
def origin_east_asia(origin):
return origin_china(origin) or origin_japan(origin) \
or origin_mongolia(origin) or origin_south_korea(origin) \
or origin_taiwan(origin)
|
#vtb
def normalize(self) -> :
tensor = self.tensor / bk.ccast(bk.sqrt(self.norm()))
return State(tensor, self.qubits, self._memory)
|
Normalize the state
|
### Input:
Normalize the state
### Response:
#vtb
def normalize(self) -> :
tensor = self.tensor / bk.ccast(bk.sqrt(self.norm()))
return State(tensor, self.qubits, self._memory)
|
#vtb
def _load_poses(self):
pose_file = os.path.join(self.pose_path, self.sequence + )
poses = []
try:
with open(pose_file, ) as f:
lines = f.readlines()
if self.frames is not None:
lines = [lines[i] for i in self.frames]
for line in lines:
T_w_cam0 = np.fromstring(line, dtype=float, sep=)
T_w_cam0 = T_w_cam0.reshape(3, 4)
T_w_cam0 = np.vstack((T_w_cam0, [0, 0, 0, 1]))
poses.append(T_w_cam0)
except FileNotFoundError:
print( +
self.sequence + )
self.poses = poses
|
Load ground truth poses (T_w_cam0) from file.
|
### Input:
Load ground truth poses (T_w_cam0) from file.
### Response:
#vtb
def _load_poses(self):
pose_file = os.path.join(self.pose_path, self.sequence + )
poses = []
try:
with open(pose_file, ) as f:
lines = f.readlines()
if self.frames is not None:
lines = [lines[i] for i in self.frames]
for line in lines:
T_w_cam0 = np.fromstring(line, dtype=float, sep=)
T_w_cam0 = T_w_cam0.reshape(3, 4)
T_w_cam0 = np.vstack((T_w_cam0, [0, 0, 0, 1]))
poses.append(T_w_cam0)
except FileNotFoundError:
print( +
self.sequence + )
self.poses = poses
|
#vtb
def get_creation_date(
self,
bucket: str,
key: str,
) -> datetime:
return self.get_last_modified_date(bucket, key)
|
Retrieves the creation date for a given key in a given bucket.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which the creation date is being retrieved.
:return: the creation date
|
### Input:
Retrieves the creation date for a given key in a given bucket.
:param bucket: the bucket the object resides in.
:param key: the key of the object for which the creation date is being retrieved.
:return: the creation date
### Response:
#vtb
def get_creation_date(
self,
bucket: str,
key: str,
) -> datetime:
return self.get_last_modified_date(bucket, key)
|
#vtb
def _pop_comment_block(self, statements, header_re):
res = []
comments = []
match = None
st_iter = iter(statements)
for st in st_iter:
if isinstance(st, ast.Comment):
match = header_re.match(st.text)
if match:
break
else:
res.append(st)
else:
res.append(st)
for st in st_iter:
if isinstance(st, ast.Comment):
comments.append(st)
else:
res.append(st)
break
res.extend(list(st_iter))
return match, dedent("".join(c.text[1:] + "\n" for c in comments)), res
|
Look for a series of comments that start with one that matches the
regex. If the first comment is found, all subsequent comments are
popped from statements, concatenated and dedented and returned.
|
### Input:
Look for a series of comments that start with one that matches the
regex. If the first comment is found, all subsequent comments are
popped from statements, concatenated and dedented and returned.
### Response:
#vtb
def _pop_comment_block(self, statements, header_re):
res = []
comments = []
match = None
st_iter = iter(statements)
for st in st_iter:
if isinstance(st, ast.Comment):
match = header_re.match(st.text)
if match:
break
else:
res.append(st)
else:
res.append(st)
for st in st_iter:
if isinstance(st, ast.Comment):
comments.append(st)
else:
res.append(st)
break
res.extend(list(st_iter))
return match, dedent("".join(c.text[1:] + "\n" for c in comments)), res
|
#vtb
def covlen(args):
import numpy as np
import pandas as pd
import seaborn as sns
from jcvi.formats.base import DictFile
p = OptionParser(covlen.__doc__)
p.add_option("--maxsize", default=1000000, type="int", help="Max contig size")
p.add_option("--maxcov", default=100, type="int", help="Max contig size")
p.add_option("--color", default=, help="Color of the data points")
p.add_option("--kind", default="scatter",
choices=("scatter", "reg", "resid", "kde", "hex"),
help="Kind of plot to draw")
opts, args, iopts = p.set_image_options(args, figsize="8x8")
if len(args) != 2:
sys.exit(not p.print_help())
covfile, fastafile = args
cov = DictFile(covfile, cast=float)
s = Sizes(fastafile)
data = []
maxsize, maxcov = opts.maxsize, opts.maxcov
for ctg, size in s.iter_sizes():
c = cov.get(ctg, 0)
if size > maxsize:
continue
if c > maxcov:
continue
data.append((size, c))
x, y = zip(*data)
x = np.array(x)
y = np.array(y)
logging.debug("X size {0}, Y size {1}".format(x.size, y.size))
df = pd.DataFrame()
xlab, ylab = "Length", "Coverage of depth (X)"
df[xlab] = x
df[ylab] = y
sns.jointplot(xlab, ylab, kind=opts.kind, data=df,
xlim=(0, maxsize), ylim=(0, maxcov),
stat_func=None, edgecolor="w", color=opts.color)
figname = covfile + ".pdf"
savefig(figname, dpi=iopts.dpi, iopts=iopts)
|
%prog covlen covfile fastafile
Plot coverage vs length. `covfile` is two-column listing contig id and
depth of coverage.
|
### Input:
%prog covlen covfile fastafile
Plot coverage vs length. `covfile` is two-column listing contig id and
depth of coverage.
### Response:
#vtb
def covlen(args):
import numpy as np
import pandas as pd
import seaborn as sns
from jcvi.formats.base import DictFile
p = OptionParser(covlen.__doc__)
p.add_option("--maxsize", default=1000000, type="int", help="Max contig size")
p.add_option("--maxcov", default=100, type="int", help="Max contig size")
p.add_option("--color", default=, help="Color of the data points")
p.add_option("--kind", default="scatter",
choices=("scatter", "reg", "resid", "kde", "hex"),
help="Kind of plot to draw")
opts, args, iopts = p.set_image_options(args, figsize="8x8")
if len(args) != 2:
sys.exit(not p.print_help())
covfile, fastafile = args
cov = DictFile(covfile, cast=float)
s = Sizes(fastafile)
data = []
maxsize, maxcov = opts.maxsize, opts.maxcov
for ctg, size in s.iter_sizes():
c = cov.get(ctg, 0)
if size > maxsize:
continue
if c > maxcov:
continue
data.append((size, c))
x, y = zip(*data)
x = np.array(x)
y = np.array(y)
logging.debug("X size {0}, Y size {1}".format(x.size, y.size))
df = pd.DataFrame()
xlab, ylab = "Length", "Coverage of depth (X)"
df[xlab] = x
df[ylab] = y
sns.jointplot(xlab, ylab, kind=opts.kind, data=df,
xlim=(0, maxsize), ylim=(0, maxcov),
stat_func=None, edgecolor="w", color=opts.color)
figname = covfile + ".pdf"
savefig(figname, dpi=iopts.dpi, iopts=iopts)
|
#vtb
def get_contacts(self):
all_contacts = self.wapi_functions.getAllContacts()
return [Contact(contact, self) for contact in all_contacts]
|
Fetches list of all contacts
This will return chats with people from the address book only
Use get_all_chats for all chats
:return: List of contacts
:rtype: list[Contact]
|
### Input:
Fetches list of all contacts
This will return chats with people from the address book only
Use get_all_chats for all chats
:return: List of contacts
:rtype: list[Contact]
### Response:
#vtb
def get_contacts(self):
all_contacts = self.wapi_functions.getAllContacts()
return [Contact(contact, self) for contact in all_contacts]
|
#vtb
def interp(self, new_timestamps, interpolation_mode=0):
if not len(self.samples) or not len(new_timestamps):
return Signal(
self.samples.copy(),
self.timestamps.copy(),
self.unit,
self.name,
comment=self.comment,
conversion=self.conversion,
raw=self.raw,
master_metadata=self.master_metadata,
display_name=self.display_name,
attachment=self.attachment,
stream_sync=self.stream_sync,
invalidation_bits=self.invalidation_bits.copy()
if self.invalidation_bits is not None
else None,
encoding=self.encoding,
)
else:
if len(self.samples.shape) > 1:
idx = np.searchsorted(self.timestamps, new_timestamps, side="right")
idx -= 1
idx = np.clip(idx, 0, idx[-1])
s = self.samples[idx]
if self.invalidation_bits is not None:
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
else:
kind = self.samples.dtype.kind
if kind == "f":
s = np.interp(new_timestamps, self.timestamps, self.samples)
if self.invalidation_bits is not None:
idx = np.searchsorted(
self.timestamps, new_timestamps, side="right"
)
idx -= 1
idx = np.clip(idx, 0, idx[-1])
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
elif kind in "ui":
if interpolation_mode == 1:
s = np.interp(
new_timestamps, self.timestamps, self.samples
).astype(self.samples.dtype)
if self.invalidation_bits is not None:
idx = np.searchsorted(
self.timestamps, new_timestamps, side="right"
)
idx -= 1
idx = np.clip(idx, 0, idx[-1])
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
else:
idx = np.searchsorted(
self.timestamps, new_timestamps, side="right"
)
idx -= 1
idx = np.clip(idx, 0, idx[-1])
s = self.samples[idx]
if self.invalidation_bits is not None:
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
else:
idx = np.searchsorted(self.timestamps, new_timestamps, side="right")
idx -= 1
idx = np.clip(idx, 0, idx[-1])
s = self.samples[idx]
if self.invalidation_bits is not None:
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
return Signal(
s,
new_timestamps,
self.unit,
self.name,
comment=self.comment,
conversion=self.conversion,
source=self.source,
raw=self.raw,
master_metadata=self.master_metadata,
display_name=self.display_name,
attachment=self.attachment,
stream_sync=self.stream_sync,
invalidation_bits=invalidation_bits,
encoding=self.encoding,
)
|
returns a new *Signal* interpolated using the *new_timestamps*
Parameters
----------
new_timestamps : np.array
timestamps used for interpolation
interpolation_mode : int
interpolation mode for integer signals; default 0
* 0 - repeat previous samples
* 1 - linear interpolation
Returns
-------
signal : Signal
new interpolated *Signal*
|
### Input:
returns a new *Signal* interpolated using the *new_timestamps*
Parameters
----------
new_timestamps : np.array
timestamps used for interpolation
interpolation_mode : int
interpolation mode for integer signals; default 0
* 0 - repeat previous samples
* 1 - linear interpolation
Returns
-------
signal : Signal
new interpolated *Signal*
### Response:
#vtb
def interp(self, new_timestamps, interpolation_mode=0):
if not len(self.samples) or not len(new_timestamps):
return Signal(
self.samples.copy(),
self.timestamps.copy(),
self.unit,
self.name,
comment=self.comment,
conversion=self.conversion,
raw=self.raw,
master_metadata=self.master_metadata,
display_name=self.display_name,
attachment=self.attachment,
stream_sync=self.stream_sync,
invalidation_bits=self.invalidation_bits.copy()
if self.invalidation_bits is not None
else None,
encoding=self.encoding,
)
else:
if len(self.samples.shape) > 1:
idx = np.searchsorted(self.timestamps, new_timestamps, side="right")
idx -= 1
idx = np.clip(idx, 0, idx[-1])
s = self.samples[idx]
if self.invalidation_bits is not None:
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
else:
kind = self.samples.dtype.kind
if kind == "f":
s = np.interp(new_timestamps, self.timestamps, self.samples)
if self.invalidation_bits is not None:
idx = np.searchsorted(
self.timestamps, new_timestamps, side="right"
)
idx -= 1
idx = np.clip(idx, 0, idx[-1])
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
elif kind in "ui":
if interpolation_mode == 1:
s = np.interp(
new_timestamps, self.timestamps, self.samples
).astype(self.samples.dtype)
if self.invalidation_bits is not None:
idx = np.searchsorted(
self.timestamps, new_timestamps, side="right"
)
idx -= 1
idx = np.clip(idx, 0, idx[-1])
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
else:
idx = np.searchsorted(
self.timestamps, new_timestamps, side="right"
)
idx -= 1
idx = np.clip(idx, 0, idx[-1])
s = self.samples[idx]
if self.invalidation_bits is not None:
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
else:
idx = np.searchsorted(self.timestamps, new_timestamps, side="right")
idx -= 1
idx = np.clip(idx, 0, idx[-1])
s = self.samples[idx]
if self.invalidation_bits is not None:
invalidation_bits = self.invalidation_bits[idx]
else:
invalidation_bits = None
return Signal(
s,
new_timestamps,
self.unit,
self.name,
comment=self.comment,
conversion=self.conversion,
source=self.source,
raw=self.raw,
master_metadata=self.master_metadata,
display_name=self.display_name,
attachment=self.attachment,
stream_sync=self.stream_sync,
invalidation_bits=invalidation_bits,
encoding=self.encoding,
)
|
#vtb
def set_sequence_from_str(self, sequence):
self._qsequences = [QKeySequence(s) for s in sequence.split()]
self.update_warning()
|
This is a convenience method to set the new QKeySequence of the
shortcut editor from a string.
|
### Input:
This is a convenience method to set the new QKeySequence of the
shortcut editor from a string.
### Response:
#vtb
def set_sequence_from_str(self, sequence):
self._qsequences = [QKeySequence(s) for s in sequence.split()]
self.update_warning()
|
#vtb
def restore(self):
sys = set(self._sys_modules.keys())
for mod_name in sys.difference(self._saved_modules):
del self._sys_modules[mod_name]
|
Unloads all modules that weren't loaded when save_modules was called.
|
### Input:
Unloads all modules that weren't loaded when save_modules was called.
### Response:
#vtb
def restore(self):
sys = set(self._sys_modules.keys())
for mod_name in sys.difference(self._saved_modules):
del self._sys_modules[mod_name]
|
#vtb
def unload_extension(self, module_str):
if module_str in sys.modules:
mod = sys.modules[module_str]
self._call_unload_ipython_extension(mod)
|
Unload an IPython extension by its module name.
This function looks up the extension's name in ``sys.modules`` and
simply calls ``mod.unload_ipython_extension(self)``.
|
### Input:
Unload an IPython extension by its module name.
This function looks up the extension's name in ``sys.modules`` and
simply calls ``mod.unload_ipython_extension(self)``.
### Response:
#vtb
def unload_extension(self, module_str):
if module_str in sys.modules:
mod = sys.modules[module_str]
self._call_unload_ipython_extension(mod)
|
#vtb
def list_data_links(self, instance):
response = self.get_proto(path= + instance)
message = rest_pb2.ListLinkInfoResponse()
message.ParseFromString(response.content)
links = getattr(message, )
return iter([Link(link) for link in links])
|
Lists the data links visible to this client.
Data links are returned in random order.
:param str instance: A Yamcs instance name.
:rtype: ~collections.Iterable[.Link]
|
### Input:
Lists the data links visible to this client.
Data links are returned in random order.
:param str instance: A Yamcs instance name.
:rtype: ~collections.Iterable[.Link]
### Response:
#vtb
def list_data_links(self, instance):
response = self.get_proto(path= + instance)
message = rest_pb2.ListLinkInfoResponse()
message.ParseFromString(response.content)
links = getattr(message, )
return iter([Link(link) for link in links])
|
#vtb
def set_type_by_schema(self, schema_obj, schema_type):
schema_id = self._get_object_schema_id(schema_obj, schema_type)
if not self.storage.contains(schema_id):
schema = self.storage.create_schema(
schema_obj, self.name, schema_type, root=self.root)
assert schema.schema_id == schema_id
self._type = schema_id
|
Set property type by schema object
Schema will create, if it doesn't exists in collection
:param dict schema_obj: raw schema object
:param str schema_type:
|
### Input:
Set property type by schema object
Schema will create, if it doesn't exists in collection
:param dict schema_obj: raw schema object
:param str schema_type:
### Response:
#vtb
def set_type_by_schema(self, schema_obj, schema_type):
schema_id = self._get_object_schema_id(schema_obj, schema_type)
if not self.storage.contains(schema_id):
schema = self.storage.create_schema(
schema_obj, self.name, schema_type, root=self.root)
assert schema.schema_id == schema_id
self._type = schema_id
|
#vtb
def with_metaclass(meta, *bases):
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass("NewBase", None, {})
|
Create a base class with a metaclass.
For example, if you have the metaclass
>>> class Meta(type):
... pass
Use this as the metaclass by doing
>>> from symengine.compatibility import with_metaclass
>>> class MyClass(with_metaclass(Meta, object)):
... pass
This is equivalent to the Python 2::
class MyClass(object):
__metaclass__ = Meta
or Python 3::
class MyClass(object, metaclass=Meta):
pass
That is, the first argument is the metaclass, and the remaining arguments
are the base classes. Note that if the base class is just ``object``, you
may omit it.
>>> MyClass.__mro__
(<class 'MyClass'>, <... 'object'>)
>>> type(MyClass)
<class 'Meta'>
|
### Input:
Create a base class with a metaclass.
For example, if you have the metaclass
>>> class Meta(type):
... pass
Use this as the metaclass by doing
>>> from symengine.compatibility import with_metaclass
>>> class MyClass(with_metaclass(Meta, object)):
... pass
This is equivalent to the Python 2::
class MyClass(object):
__metaclass__ = Meta
or Python 3::
class MyClass(object, metaclass=Meta):
pass
That is, the first argument is the metaclass, and the remaining arguments
are the base classes. Note that if the base class is just ``object``, you
may omit it.
>>> MyClass.__mro__
(<class 'MyClass'>, <... 'object'>)
>>> type(MyClass)
<class 'Meta'>
### Response:
#vtb
def with_metaclass(meta, *bases):
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass("NewBase", None, {})
|
#vtb
def _get_text(self):
boxes = self.boxes
txt = []
for line in boxes:
txt_line = u""
for box in line.word_boxes:
txt_line += u" " + box.content
txt.append(txt_line)
return txt
|
Get the text corresponding to this page
|
### Input:
Get the text corresponding to this page
### Response:
#vtb
def _get_text(self):
boxes = self.boxes
txt = []
for line in boxes:
txt_line = u""
for box in line.word_boxes:
txt_line += u" " + box.content
txt.append(txt_line)
return txt
|
#vtb
def sense_ttb(self, target):
return super(Device, self).sense_ttb(target, did=b)
|
Activate the RF field and probe for a Type B Target.
The RC-S956 can discover Type B Targets (Type 4B Tag) at 106
kbps. For a Type 4B Tag the firmware automatically sends an
ATTRIB command that configures the use of DID and 64 byte
maximum frame size. The driver reverts this configuration with
a DESELECT and WUPB command to return the target prepared for
activation (which nfcpy does in the tag activation code).
|
### Input:
Activate the RF field and probe for a Type B Target.
The RC-S956 can discover Type B Targets (Type 4B Tag) at 106
kbps. For a Type 4B Tag the firmware automatically sends an
ATTRIB command that configures the use of DID and 64 byte
maximum frame size. The driver reverts this configuration with
a DESELECT and WUPB command to return the target prepared for
activation (which nfcpy does in the tag activation code).
### Response:
#vtb
def sense_ttb(self, target):
return super(Device, self).sense_ttb(target, did=b)
|
#vtb
def _process_messages(self, messages):
if self._shuttingdown:
return
if not messages:
proc_block_size = sys.maxsize
if self.auto_commit_every_n:
proc_block_size = self.auto_commit_every_n
msgs_to_proc = messages[:proc_block_size]
msgs_remainder = messages[proc_block_size:]
last_offset = msgs_to_proc[-1].offset
self._processor_d = d = maybeDeferred(self.processor, self, msgs_to_proc)
log.debug(, d, last_offset)
d.addBoth(self._clear_processor_deferred)
d.addCallback(self._update_processed_offset, last_offset)
if self._stopping or self._start_d is None:
d.cancel()
else:
d.addCallback(lambda _: self._process_messages(msgs_remainder))
d.addErrback(self._handle_processor_error)
|
Send messages to the `processor` callback to be processed
In the case we have a commit policy, we send messages to the processor
in blocks no bigger than auto_commit_every_n (if set). Otherwise, we
send the entire message block to be processed.
|
### Input:
Send messages to the `processor` callback to be processed
In the case we have a commit policy, we send messages to the processor
in blocks no bigger than auto_commit_every_n (if set). Otherwise, we
send the entire message block to be processed.
### Response:
#vtb
def _process_messages(self, messages):
if self._shuttingdown:
return
if not messages:
proc_block_size = sys.maxsize
if self.auto_commit_every_n:
proc_block_size = self.auto_commit_every_n
msgs_to_proc = messages[:proc_block_size]
msgs_remainder = messages[proc_block_size:]
last_offset = msgs_to_proc[-1].offset
self._processor_d = d = maybeDeferred(self.processor, self, msgs_to_proc)
log.debug(, d, last_offset)
d.addBoth(self._clear_processor_deferred)
d.addCallback(self._update_processed_offset, last_offset)
if self._stopping or self._start_d is None:
d.cancel()
else:
d.addCallback(lambda _: self._process_messages(msgs_remainder))
d.addErrback(self._handle_processor_error)
|
#vtb
def json(self, dict=False, **kwargs):
try:
graph = self.graph
except AttributeError:
raise NotImplementedError()
return _netjson_networkgraph(self.protocol,
self.version,
self.revision,
self.metric,
graph.nodes(data=True),
graph.edges(data=True),
dict,
**kwargs)
|
Outputs NetJSON format
|
### Input:
Outputs NetJSON format
### Response:
#vtb
def json(self, dict=False, **kwargs):
try:
graph = self.graph
except AttributeError:
raise NotImplementedError()
return _netjson_networkgraph(self.protocol,
self.version,
self.revision,
self.metric,
graph.nodes(data=True),
graph.edges(data=True),
dict,
**kwargs)
|
#vtb
def derive(self, modifier):
def forward(value):
changed_value = modifier(value)
derived.fire(changed_value)
derived = Event()
self.add_callback(forward)
return derived
|
Returns a new :class:`Event` instance that will fire
when this event fires. The value passed to the callbacks
to the new event is the return value of the given
`modifier` function which is passed the original value.
|
### Input:
Returns a new :class:`Event` instance that will fire
when this event fires. The value passed to the callbacks
to the new event is the return value of the given
`modifier` function which is passed the original value.
### Response:
#vtb
def derive(self, modifier):
def forward(value):
changed_value = modifier(value)
derived.fire(changed_value)
derived = Event()
self.add_callback(forward)
return derived
|
#vtb
def verify_client_id(self):
from .models import Client
from .exceptions.invalid_client import ClientDoesNotExist
from .exceptions.invalid_request import ClientNotProvided
if self.client_id:
try:
self.client = Client.objects.for_id(self.client_id)
except (Client.DoesNotExist, ValueError):
raise ClientDoesNotExist()
else:
raise ClientNotProvided()
|
Verify a provided client id against the database and set the `Client` object that is
associated with it to `self.client`.
TODO: Document all of the thrown exceptions.
|
### Input:
Verify a provided client id against the database and set the `Client` object that is
associated with it to `self.client`.
TODO: Document all of the thrown exceptions.
### Response:
#vtb
def verify_client_id(self):
from .models import Client
from .exceptions.invalid_client import ClientDoesNotExist
from .exceptions.invalid_request import ClientNotProvided
if self.client_id:
try:
self.client = Client.objects.for_id(self.client_id)
except (Client.DoesNotExist, ValueError):
raise ClientDoesNotExist()
else:
raise ClientNotProvided()
|
#vtb
def contains(self, key, counter_id):
with self._lock:
return counter_id in self._metadata[key]
|
Return whether a counter_id is present for a given instance key.
If the key is not in the cache, raises a KeyError.
|
### Input:
Return whether a counter_id is present for a given instance key.
If the key is not in the cache, raises a KeyError.
### Response:
#vtb
def contains(self, key, counter_id):
with self._lock:
return counter_id in self._metadata[key]
|
#vtb
def get_label(self,callb=None):
if self.label is None:
mypartial=partial(self.resp_set_label)
if callb:
mycallb=lambda x,y:(mypartial(y),callb(x,y))
else:
mycallb=lambda x,y:mypartial(y)
response = self.req_with_resp(GetLabel, StateLabel, callb=mycallb )
return self.label
|
Convenience method to request the label from the device
This method will check whether the value has already been retrieved from the device,
if so, it will simply return it. If no, it will request the information from the device
and request that callb be executed when a response is received. The default callback
will simply cache the value.
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:returns: The cached value
:rtype: str
|
### Input:
Convenience method to request the label from the device
This method will check whether the value has already been retrieved from the device,
if so, it will simply return it. If no, it will request the information from the device
and request that callb be executed when a response is received. The default callback
will simply cache the value.
:param callb: Callable to be used when the response is received. If not set,
self.resp_set_label will be used.
:type callb: callable
:returns: The cached value
:rtype: str
### Response:
#vtb
def get_label(self,callb=None):
if self.label is None:
mypartial=partial(self.resp_set_label)
if callb:
mycallb=lambda x,y:(mypartial(y),callb(x,y))
else:
mycallb=lambda x,y:mypartial(y)
response = self.req_with_resp(GetLabel, StateLabel, callb=mycallb )
return self.label
|
#vtb
def pkcs7_unpad(data):
if isinstance(data, str):
return data[0:-ord(data[-1])]
else:
return data[0:-data[-1]]
|
Remove the padding bytes that were added at point of encryption.
Implementation copied from pyaspora:
https://github.com/mjnovice/pyaspora/blob/master/pyaspora/diaspora/protocol.py#L209
|
### Input:
Remove the padding bytes that were added at point of encryption.
Implementation copied from pyaspora:
https://github.com/mjnovice/pyaspora/blob/master/pyaspora/diaspora/protocol.py#L209
### Response:
#vtb
def pkcs7_unpad(data):
if isinstance(data, str):
return data[0:-ord(data[-1])]
else:
return data[0:-data[-1]]
|
#vtb
def export(name,
target=None,
rev=None,
user=None,
username=None,
password=None,
force=False,
overwrite=False,
externals=True,
trust=False,
trust_failures=None):
s --trust-server-cert
trust_failures : None
Comma-separated list of certificate trust failures, that shall be
ignored. This can be used if trust=True is not sufficient. The
specified string is passed to SVN
ret = {: name, : True, : , : {}}
if not target:
return _fail(ret, )
svn_cmd =
cwd, basename = os.path.split(target)
opts = tuple()
if not overwrite and os.path.exists(target) and not os.path.isdir(target):
return _fail(ret,
.format(target)
)
if __opts__[]:
if not os.path.exists(target):
return _neutral_test(
ret,
(t exist and is set to be checked out.svn.listHEAD{0}HEAD--force--ignore-externals--trust-server-cert--trust-server-cert-failureschangesnewchangescomment was Exported to ' + target
return ret
|
Export a file or directory from an SVN repository
name
Address and path to the file or directory to be exported.
target
Name of the target directory where the checkout will put the working
directory
rev : None
The name revision number to checkout. Enable "force" if the directory
already exists.
user : None
Name of the user performing repository management operations
username : None
The user to access the name repository with. The svn default is the
current user
password
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
force : False
Continue if conflicts are encountered
overwrite : False
Overwrite existing target
externals : True
Change to False to not checkout or update externals
trust : False
Automatically trust the remote server. SVN's --trust-server-cert
trust_failures : None
Comma-separated list of certificate trust failures, that shall be
ignored. This can be used if trust=True is not sufficient. The
specified string is passed to SVN's --trust-server-cert-failures
option as-is.
.. versionadded:: 2019.2.0
|
### Input:
Export a file or directory from an SVN repository
name
Address and path to the file or directory to be exported.
target
Name of the target directory where the checkout will put the working
directory
rev : None
The name revision number to checkout. Enable "force" if the directory
already exists.
user : None
Name of the user performing repository management operations
username : None
The user to access the name repository with. The svn default is the
current user
password
Connect to the Subversion server with this password
.. versionadded:: 0.17.0
force : False
Continue if conflicts are encountered
overwrite : False
Overwrite existing target
externals : True
Change to False to not checkout or update externals
trust : False
Automatically trust the remote server. SVN's --trust-server-cert
trust_failures : None
Comma-separated list of certificate trust failures, that shall be
ignored. This can be used if trust=True is not sufficient. The
specified string is passed to SVN's --trust-server-cert-failures
option as-is.
.. versionadded:: 2019.2.0
### Response:
#vtb
def export(name,
target=None,
rev=None,
user=None,
username=None,
password=None,
force=False,
overwrite=False,
externals=True,
trust=False,
trust_failures=None):
s --trust-server-cert
trust_failures : None
Comma-separated list of certificate trust failures, that shall be
ignored. This can be used if trust=True is not sufficient. The
specified string is passed to SVN
ret = {: name, : True, : , : {}}
if not target:
return _fail(ret, )
svn_cmd =
cwd, basename = os.path.split(target)
opts = tuple()
if not overwrite and os.path.exists(target) and not os.path.isdir(target):
return _fail(ret,
.format(target)
)
if __opts__[]:
if not os.path.exists(target):
return _neutral_test(
ret,
(t exist and is set to be checked out.svn.listHEAD{0}HEAD--force--ignore-externals--trust-server-cert--trust-server-cert-failureschangesnewchangescomment was Exported to ' + target
return ret
|
#vtb
def escape(url):
if salt.utils.platform.is_windows():
return url
scheme = urlparse(url).scheme
if not scheme:
if url.startswith():
return url
else:
return .format(url)
elif scheme == :
path, saltenv = parse(url)
if path.startswith():
return create(path, saltenv)
else:
return create(.format(path), saltenv)
else:
return url
|
add escape character `|` to `url`
|
### Input:
add escape character `|` to `url`
### Response:
#vtb
def escape(url):
if salt.utils.platform.is_windows():
return url
scheme = urlparse(url).scheme
if not scheme:
if url.startswith():
return url
else:
return .format(url)
elif scheme == :
path, saltenv = parse(url)
if path.startswith():
return create(path, saltenv)
else:
return create(.format(path), saltenv)
else:
return url
|
#vtb
def get_memory_map_xml(self):
root = ElementTree.Element()
for r in self._context.core.memory_map:
prop.text = hex(r.blocksize).rstrip("L")
return MAP_XML_HEADER + ElementTree.tostring(root)
|
! @brief Generate GDB memory map XML.
|
### Input:
! @brief Generate GDB memory map XML.
### Response:
#vtb
def get_memory_map_xml(self):
root = ElementTree.Element()
for r in self._context.core.memory_map:
prop.text = hex(r.blocksize).rstrip("L")
return MAP_XML_HEADER + ElementTree.tostring(root)
|
#vtb
def add_aggregated_lv_components(network, components):
generators = {}
loads = {}
for lv_grid in network.mv_grid.lv_grids:
generators.setdefault(lv_grid, {})
for gen in lv_grid.generators:
generators[lv_grid].setdefault(gen.type, {})
generators[lv_grid][gen.type].setdefault(gen.subtype, {})
generators[lv_grid][gen.type][gen.subtype].setdefault(
, 0)
generators[lv_grid][gen.type][gen.subtype][
] += gen.nominal_capacity
generators[lv_grid][gen.type][gen.subtype].setdefault(
,
.join([gen.type,
gen.subtype,
,
,
str(lv_grid.id)]))
loads.setdefault(lv_grid, {})
for lo in lv_grid.graph.nodes_by_attribute():
for sector, val in lo.consumption.items():
loads[lv_grid].setdefault(sector, 0)
loads[lv_grid][sector] += val
generator = {: [],
: [],
: [],
: [],
: []}
load = {: [], : []}
for lv_grid_obj, lv_grid in generators.items():
for _, gen_type in lv_grid.items():
for _, gen_subtype in gen_type.items():
generator[].append(gen_subtype[])
generator[].append(
.join([, lv_grid_obj.station.__repr__()]))
generator[].append()
generator[].append(gen_subtype[])
generator[].append("")
for lv_grid_obj, lv_grid in loads.items():
for sector, val in lv_grid.items():
load[].append(.join([, sector, repr(lv_grid_obj)]))
load[].append(
.join([, lv_grid_obj.station.__repr__()]))
components[] = pd.concat(
[components[], pd.DataFrame(generator).set_index()])
components[] = pd.concat(
[components[], pd.DataFrame(load).set_index()])
return components
|
Aggregates LV load and generation at LV stations
Use this function if you aim for MV calculation only. The according
DataFrames of `components` are extended by load and generators representing
these aggregated respecting the technology type.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
components : dict of :pandas:`pandas.DataFrame<dataframe>`
PyPSA components in tabular format
Returns
-------
:obj:`dict` of :pandas:`pandas.DataFrame<dataframe>`
The dictionary components passed to the function is returned altered.
|
### Input:
Aggregates LV load and generation at LV stations
Use this function if you aim for MV calculation only. The according
DataFrames of `components` are extended by load and generators representing
these aggregated respecting the technology type.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
components : dict of :pandas:`pandas.DataFrame<dataframe>`
PyPSA components in tabular format
Returns
-------
:obj:`dict` of :pandas:`pandas.DataFrame<dataframe>`
The dictionary components passed to the function is returned altered.
### Response:
#vtb
def add_aggregated_lv_components(network, components):
generators = {}
loads = {}
for lv_grid in network.mv_grid.lv_grids:
generators.setdefault(lv_grid, {})
for gen in lv_grid.generators:
generators[lv_grid].setdefault(gen.type, {})
generators[lv_grid][gen.type].setdefault(gen.subtype, {})
generators[lv_grid][gen.type][gen.subtype].setdefault(
, 0)
generators[lv_grid][gen.type][gen.subtype][
] += gen.nominal_capacity
generators[lv_grid][gen.type][gen.subtype].setdefault(
,
.join([gen.type,
gen.subtype,
,
,
str(lv_grid.id)]))
loads.setdefault(lv_grid, {})
for lo in lv_grid.graph.nodes_by_attribute():
for sector, val in lo.consumption.items():
loads[lv_grid].setdefault(sector, 0)
loads[lv_grid][sector] += val
generator = {: [],
: [],
: [],
: [],
: []}
load = {: [], : []}
for lv_grid_obj, lv_grid in generators.items():
for _, gen_type in lv_grid.items():
for _, gen_subtype in gen_type.items():
generator[].append(gen_subtype[])
generator[].append(
.join([, lv_grid_obj.station.__repr__()]))
generator[].append()
generator[].append(gen_subtype[])
generator[].append("")
for lv_grid_obj, lv_grid in loads.items():
for sector, val in lv_grid.items():
load[].append(.join([, sector, repr(lv_grid_obj)]))
load[].append(
.join([, lv_grid_obj.station.__repr__()]))
components[] = pd.concat(
[components[], pd.DataFrame(generator).set_index()])
components[] = pd.concat(
[components[], pd.DataFrame(load).set_index()])
return components
|
#vtb
def assign_taxonomy(
data, min_confidence=0.80, output_fp=None, training_data_fp=None,
fixrank=True, max_memory=None, tmp_dir=tempfile.gettempdir()):
data = list(data)
for line in app_result[]:
excep = parse_rdp_exception(line)
if excep is not None:
_, rdp_id = excep
orig_id = seq_id_lookup[rdp_id]
assignments[orig_id] = (, 1.0)
for line in app_result[]:
rdp_id, direction, taxa = parse_rdp_assignment(line)
if taxa[0][0] == "Root":
taxa = taxa[1:]
orig_id = seq_id_lookup[rdp_id]
lineage, confidence = get_rdp_lineage(taxa, min_confidence)
if lineage:
assignments[orig_id] = (.join(lineage), confidence)
else:
assignments[orig_id] = (, 1.0)
if output_fp:
try:
output_file = open(output_fp, )
except OSError:
raise OSError("Can%s\t%s\t%1.3f\n' % (seq_id, lineage, confidence))
output_file.close()
return None
else:
return assignments
|
Assign taxonomy to each sequence in data with the RDP classifier
data: open fasta file object or list of fasta lines
confidence: minimum support threshold to assign taxonomy to a sequence
output_fp: path to write output; if not provided, result will be
returned in a dict of {seq_id:(taxonomy_assignment,confidence)}
|
### Input:
Assign taxonomy to each sequence in data with the RDP classifier
data: open fasta file object or list of fasta lines
confidence: minimum support threshold to assign taxonomy to a sequence
output_fp: path to write output; if not provided, result will be
returned in a dict of {seq_id:(taxonomy_assignment,confidence)}
### Response:
#vtb
def assign_taxonomy(
data, min_confidence=0.80, output_fp=None, training_data_fp=None,
fixrank=True, max_memory=None, tmp_dir=tempfile.gettempdir()):
data = list(data)
for line in app_result[]:
excep = parse_rdp_exception(line)
if excep is not None:
_, rdp_id = excep
orig_id = seq_id_lookup[rdp_id]
assignments[orig_id] = (, 1.0)
for line in app_result[]:
rdp_id, direction, taxa = parse_rdp_assignment(line)
if taxa[0][0] == "Root":
taxa = taxa[1:]
orig_id = seq_id_lookup[rdp_id]
lineage, confidence = get_rdp_lineage(taxa, min_confidence)
if lineage:
assignments[orig_id] = (.join(lineage), confidence)
else:
assignments[orig_id] = (, 1.0)
if output_fp:
try:
output_file = open(output_fp, )
except OSError:
raise OSError("Can%s\t%s\t%1.3f\n' % (seq_id, lineage, confidence))
output_file.close()
return None
else:
return assignments
|
#vtb
def setup(self):
self.log.info()
if not os.path.exists(self.pathToWorkspace):
os.makedirs(self.pathToWorkspace)
if not os.path.exists(self.pathToWorkspace + "/qubits_output"):
os.makedirs(self.pathToWorkspace + "/qubits_output")
spectralDB = os.path.dirname(
__file__) + "/resources/qubits_spectral_database"
qubitsSettings = os.path.dirname(
__file__) + "/resources/qubits_settings.yaml"
dstSettings = self.pathToWorkspace + "/qubits_settings.yaml"
if os.path.exists(self.pathToWorkspace + "/qubits_spectral_database") or os.path.exists(dstSettings):
self.log.warning(
"A qubits workspace seems to already exist in this location")
sys.exit(0)
shutil.copytree(spectralDB, self.pathToWorkspace +
"/qubits_spectral_database")
shutil.copyfile(qubitsSettings, dstSettings)
return None
|
*setup the workspace in the requested location*
**Return:**
- ``None``
|
### Input:
*setup the workspace in the requested location*
**Return:**
- ``None``
### Response:
#vtb
def setup(self):
self.log.info()
if not os.path.exists(self.pathToWorkspace):
os.makedirs(self.pathToWorkspace)
if not os.path.exists(self.pathToWorkspace + "/qubits_output"):
os.makedirs(self.pathToWorkspace + "/qubits_output")
spectralDB = os.path.dirname(
__file__) + "/resources/qubits_spectral_database"
qubitsSettings = os.path.dirname(
__file__) + "/resources/qubits_settings.yaml"
dstSettings = self.pathToWorkspace + "/qubits_settings.yaml"
if os.path.exists(self.pathToWorkspace + "/qubits_spectral_database") or os.path.exists(dstSettings):
self.log.warning(
"A qubits workspace seems to already exist in this location")
sys.exit(0)
shutil.copytree(spectralDB, self.pathToWorkspace +
"/qubits_spectral_database")
shutil.copyfile(qubitsSettings, dstSettings)
return None
|
#vtb
def delete(self, id):
lt = meta.Session.query(LayerTemplate).get(id)
if lt is None:
abort(404)
meta.Session.delete(lt)
meta.Session.commit()
|
DELETE /layertemplates/id: Delete an existing item.
|
### Input:
DELETE /layertemplates/id: Delete an existing item.
### Response:
#vtb
def delete(self, id):
lt = meta.Session.query(LayerTemplate).get(id)
if lt is None:
abort(404)
meta.Session.delete(lt)
meta.Session.commit()
|
#vtb
def update(did):
required_attributes = [, , , , , ,
]
required_metadata_base_attributes = [, , , ,
, , , ]
required_metadata_curation_attributes = [, ]
assert isinstance(request.json, dict),
data = request.json
if not data:
logger.error(f)
return 400
msg, status = check_required_attributes(required_attributes, data, )
if msg:
return msg, status
msg, status = check_required_attributes(required_metadata_base_attributes,
_get_base_metadata(data[]), )
if msg:
return msg, status
msg, status = check_required_attributes(required_metadata_curation_attributes,
_get_curation_metadata(data[]), )
if msg:
return msg, status
msg, status = check_no_urls_in_files(_get_base_metadata(data[]), )
if msg:
return msg, status
msg, status = validate_date_format(data[])
if msg:
return msg, status
_record = dict()
_record = copy.deepcopy(data)
_record[] = datetime.strptime(data[], )
try:
if dao.get(did) is None:
register()
return _sanitize_record(_record), 201
else:
for service in _record[]:
service_id = int(service[])
if service[] == :
_record[][service_id][][][] = _get_date(
dao.get(did)[])
dao.update(_record, did)
return Response(_sanitize_record(_record), 200, content_type=)
except Exception as err:
return f, 500
|
Update DDO of an existing asset
---
tags:
- ddo
consumes:
- application/json
parameters:
- in: body
name: body
required: true
description: DDO of the asset.
schema:
type: object
required:
- "@context"
- created
- id
- publicKey
- authentication
- proof
- service
properties:
"@context":
description:
example: https://w3id.org/future-method/v1
type: string
id:
description: ID of the asset.
example: did:op:123456789abcdefghi
type: string
created:
description: date of ddo creation.
example: "2016-02-08T16:02:20Z"
type: string
publicKey:
type: array
description: List of public keys.
example: [{"id": "did:op:123456789abcdefghi#keys-1"},
{"type": "Ed25519VerificationKey2018"},
{"owner": "did:op:123456789abcdefghi"},
{"publicKeyBase58": "H3C2AVvLMv6gmMNam3uVAjZpfkcJCwDwnZn6z3wXmqPV"}]
authentication:
type: array
description: List of authentication mechanisms.
example: [{"type": "RsaSignatureAuthentication2018"},
{"publicKey": "did:op:123456789abcdefghi#keys-1"}]
proof:
type: dictionary
description: Information about the creation and creator of the asset.
example: {"type": "UUIDSignature",
"created": "2016-02-08T16:02:20Z",
"creator": "did:example:8uQhQMGzWxR8vw5P3UWH1ja",
"signatureValue": "QNB13Y7Q9...1tzjn4w=="
}
service:
type: array
description: List of services.
example: [{"type": "Access",
"serviceEndpoint":
"http://mybrizo.org/api/v1/brizo/services/consume?pubKey=${
pubKey}&serviceId={serviceId}&url={url}"},
{"type": "Compute",
"serviceEndpoint":
"http://mybrizo.org/api/v1/brizo/services/compute?pubKey=${
pubKey}&serviceId={serviceId}&algo={algo}&container={container}"},
{
"type": "Metadata",
"serviceDefinitionId": "2",
"serviceEndpoint":
"http://myaquarius.org/api/v1/provider/assets/metadata/{did}",
"metadata": {
"base": {
"name": "UK Weather information 2011",
"type": "dataset",
"description": "Weather information of UK including
temperature and humidity",
"dateCreated": "2012-02-01T10:55:11Z",
"author": "Met Office",
"license": "CC-BY",
"copyrightHolder": "Met Office",
"compression": "zip",
"workExample": "stationId,latitude,longitude,datetime,
temperature,humidity/n423432fsd,51.509865,-0.118092,
2011-01-01T10:55:11+00:00,7.2,68",
"files": [{
"contentLength": "4535431",
"contentType": "text/csv",
"encoding": "UTF-8",
"compression": "zip",
"resourceId": "access-log2018-02-13-15-17-29-18386C502CAEA932"
}
],
"encryptedFiles": "0x098213xzckasdf089723hjgdasfkjgasfv",
"links": [{
"name": "Sample of Asset Data",
"type": "sample",
"url": "https://foo.com/sample.csv"
},
{
"name": "Data Format Definition",
"type": "format",
"AssetID":
"4d517500da0acb0d65a716f61330969334630363ce4a6a9d39691026ac7908ea"
}
],
"inLanguage": "en",
"tags": "weather, uk, 2011, temperature, humidity",
"price": 10,
"checksum":
"38803b9e6f04fce3fba4b124524672592264d31847182c689095a081c9e85262"
},
"curation": {
"rating": 0.93,
"numVotes": 123,
"schema": "Binary Voting"
},
"additionalInformation": {
"updateFrecuency": "yearly",
"structuredMarkup": [{
"uri": "http://skos.um.es/unescothes/C01194/jsonld",
"mediaType": "application/ld+json"
},
{
"uri": "http://skos.um.es/unescothes/C01194/turtle",
"mediaType": "text/turtle"
}
]
}
}
}]
responses:
200:
description: Asset successfully updated.
201:
description: Asset successfully registered.
400:
description: One of the required attributes is missing.
404:
description: Invalid asset data.
500:
description: Error
|
### Input:
Update DDO of an existing asset
---
tags:
- ddo
consumes:
- application/json
parameters:
- in: body
name: body
required: true
description: DDO of the asset.
schema:
type: object
required:
- "@context"
- created
- id
- publicKey
- authentication
- proof
- service
properties:
"@context":
description:
example: https://w3id.org/future-method/v1
type: string
id:
description: ID of the asset.
example: did:op:123456789abcdefghi
type: string
created:
description: date of ddo creation.
example: "2016-02-08T16:02:20Z"
type: string
publicKey:
type: array
description: List of public keys.
example: [{"id": "did:op:123456789abcdefghi#keys-1"},
{"type": "Ed25519VerificationKey2018"},
{"owner": "did:op:123456789abcdefghi"},
{"publicKeyBase58": "H3C2AVvLMv6gmMNam3uVAjZpfkcJCwDwnZn6z3wXmqPV"}]
authentication:
type: array
description: List of authentication mechanisms.
example: [{"type": "RsaSignatureAuthentication2018"},
{"publicKey": "did:op:123456789abcdefghi#keys-1"}]
proof:
type: dictionary
description: Information about the creation and creator of the asset.
example: {"type": "UUIDSignature",
"created": "2016-02-08T16:02:20Z",
"creator": "did:example:8uQhQMGzWxR8vw5P3UWH1ja",
"signatureValue": "QNB13Y7Q9...1tzjn4w=="
}
service:
type: array
description: List of services.
example: [{"type": "Access",
"serviceEndpoint":
"http://mybrizo.org/api/v1/brizo/services/consume?pubKey=${
pubKey}&serviceId={serviceId}&url={url}"},
{"type": "Compute",
"serviceEndpoint":
"http://mybrizo.org/api/v1/brizo/services/compute?pubKey=${
pubKey}&serviceId={serviceId}&algo={algo}&container={container}"},
{
"type": "Metadata",
"serviceDefinitionId": "2",
"serviceEndpoint":
"http://myaquarius.org/api/v1/provider/assets/metadata/{did}",
"metadata": {
"base": {
"name": "UK Weather information 2011",
"type": "dataset",
"description": "Weather information of UK including
temperature and humidity",
"dateCreated": "2012-02-01T10:55:11Z",
"author": "Met Office",
"license": "CC-BY",
"copyrightHolder": "Met Office",
"compression": "zip",
"workExample": "stationId,latitude,longitude,datetime,
temperature,humidity/n423432fsd,51.509865,-0.118092,
2011-01-01T10:55:11+00:00,7.2,68",
"files": [{
"contentLength": "4535431",
"contentType": "text/csv",
"encoding": "UTF-8",
"compression": "zip",
"resourceId": "access-log2018-02-13-15-17-29-18386C502CAEA932"
}
],
"encryptedFiles": "0x098213xzckasdf089723hjgdasfkjgasfv",
"links": [{
"name": "Sample of Asset Data",
"type": "sample",
"url": "https://foo.com/sample.csv"
},
{
"name": "Data Format Definition",
"type": "format",
"AssetID":
"4d517500da0acb0d65a716f61330969334630363ce4a6a9d39691026ac7908ea"
}
],
"inLanguage": "en",
"tags": "weather, uk, 2011, temperature, humidity",
"price": 10,
"checksum":
"38803b9e6f04fce3fba4b124524672592264d31847182c689095a081c9e85262"
},
"curation": {
"rating": 0.93,
"numVotes": 123,
"schema": "Binary Voting"
},
"additionalInformation": {
"updateFrecuency": "yearly",
"structuredMarkup": [{
"uri": "http://skos.um.es/unescothes/C01194/jsonld",
"mediaType": "application/ld+json"
},
{
"uri": "http://skos.um.es/unescothes/C01194/turtle",
"mediaType": "text/turtle"
}
]
}
}
}]
responses:
200:
description: Asset successfully updated.
201:
description: Asset successfully registered.
400:
description: One of the required attributes is missing.
404:
description: Invalid asset data.
500:
description: Error
### Response:
#vtb
def update(did):
required_attributes = [, , , , , ,
]
required_metadata_base_attributes = [, , , ,
, , , ]
required_metadata_curation_attributes = [, ]
assert isinstance(request.json, dict),
data = request.json
if not data:
logger.error(f)
return 400
msg, status = check_required_attributes(required_attributes, data, )
if msg:
return msg, status
msg, status = check_required_attributes(required_metadata_base_attributes,
_get_base_metadata(data[]), )
if msg:
return msg, status
msg, status = check_required_attributes(required_metadata_curation_attributes,
_get_curation_metadata(data[]), )
if msg:
return msg, status
msg, status = check_no_urls_in_files(_get_base_metadata(data[]), )
if msg:
return msg, status
msg, status = validate_date_format(data[])
if msg:
return msg, status
_record = dict()
_record = copy.deepcopy(data)
_record[] = datetime.strptime(data[], )
try:
if dao.get(did) is None:
register()
return _sanitize_record(_record), 201
else:
for service in _record[]:
service_id = int(service[])
if service[] == :
_record[][service_id][][][] = _get_date(
dao.get(did)[])
dao.update(_record, did)
return Response(_sanitize_record(_record), 200, content_type=)
except Exception as err:
return f, 500
|
#vtb
def flatten_list(multiply_list):
if isinstance(multiply_list, list):
return [rv for l in multiply_list for rv in flatten_list(l)]
else:
return [multiply_list]
|
碾平 list::
>>> a = [1, 2, [3, 4], [[5, 6], [7, 8]]]
>>> flatten_list(a)
[1, 2, 3, 4, 5, 6, 7, 8]
:param multiply_list: 混淆的多层列表
:return: 单层的 list
|
### Input:
碾平 list::
>>> a = [1, 2, [3, 4], [[5, 6], [7, 8]]]
>>> flatten_list(a)
[1, 2, 3, 4, 5, 6, 7, 8]
:param multiply_list: 混淆的多层列表
:return: 单层的 list
### Response:
#vtb
def flatten_list(multiply_list):
if isinstance(multiply_list, list):
return [rv for l in multiply_list for rv in flatten_list(l)]
else:
return [multiply_list]
|
#vtb
def get_go2sectiontxt(self):
go2txt = {}
_get_secs = self.hdrobj.get_sections
hdrgo2sectxt = {h:" ".join(_get_secs(h)) for h in self.get_hdrgos()}
usrgo2hdrgo = self.get_usrgo2hdrgo()
for goid, ntgo in self.go2nt.items():
hdrgo = ntgo.GO if ntgo.is_hdrgo else usrgo2hdrgo[ntgo.GO]
go2txt[goid] = hdrgo2sectxt[hdrgo]
return go2txt
|
Return a dict with actual header and user GO IDs as keys and their sections as values.
|
### Input:
Return a dict with actual header and user GO IDs as keys and their sections as values.
### Response:
#vtb
def get_go2sectiontxt(self):
go2txt = {}
_get_secs = self.hdrobj.get_sections
hdrgo2sectxt = {h:" ".join(_get_secs(h)) for h in self.get_hdrgos()}
usrgo2hdrgo = self.get_usrgo2hdrgo()
for goid, ntgo in self.go2nt.items():
hdrgo = ntgo.GO if ntgo.is_hdrgo else usrgo2hdrgo[ntgo.GO]
go2txt[goid] = hdrgo2sectxt[hdrgo]
return go2txt
|
#vtb
def get_fields(self, field_verbose=True, value_verbose=True, fields=[], extra_fields=[], remove_fields = []):
field_list = []
for field in self.__class__._meta.fields:
if field.name in remove_fields:
continue
if fields and field.name not in fields:
continue
if field.verbose_name and field_verbose:
value_tuple = (field.verbose_name, self.get_field_value(field, value_verbose))
else:
value_tuple = (field.name, self.get_field_value(field, value_verbose))
field_list.append(value_tuple)
for name in extra_fields:
method = getattr(self, name)
result = method()
value_tuple = (name, result)
field_list.append(value_tuple)
return field_list
|
返回字段名及其对应值的列表
field_verbose 为True,返回定义中的字段的verbose_name, False返回其name
value_verbose 为True,返回数据的显示数据,会转换为choice的内容,为False, 返回数据的实际值
fields 指定了要显示的字段
extra_fields 指定了要特殊处理的非field,比如是函数
remove_fields 指定了不显示的字段
|
### Input:
返回字段名及其对应值的列表
field_verbose 为True,返回定义中的字段的verbose_name, False返回其name
value_verbose 为True,返回数据的显示数据,会转换为choice的内容,为False, 返回数据的实际值
fields 指定了要显示的字段
extra_fields 指定了要特殊处理的非field,比如是函数
remove_fields 指定了不显示的字段
### Response:
#vtb
def get_fields(self, field_verbose=True, value_verbose=True, fields=[], extra_fields=[], remove_fields = []):
field_list = []
for field in self.__class__._meta.fields:
if field.name in remove_fields:
continue
if fields and field.name not in fields:
continue
if field.verbose_name and field_verbose:
value_tuple = (field.verbose_name, self.get_field_value(field, value_verbose))
else:
value_tuple = (field.name, self.get_field_value(field, value_verbose))
field_list.append(value_tuple)
for name in extra_fields:
method = getattr(self, name)
result = method()
value_tuple = (name, result)
field_list.append(value_tuple)
return field_list
|
#vtb
def rowCount(self, index=QModelIndex()):
if self.total_rows <= self.rows_loaded:
return self.total_rows
else:
return self.rows_loaded
|
Array row number
|
### Input:
Array row number
### Response:
#vtb
def rowCount(self, index=QModelIndex()):
if self.total_rows <= self.rows_loaded:
return self.total_rows
else:
return self.rows_loaded
|
#vtb
def _archive_entry_year(self, category):
" Return ARCHIVE_ENTRY_YEAR from settings (if exists) or year of the newest object in category "
year = getattr(settings, , None)
if not year:
n = now()
try:
year = Listing.objects.filter(
category__site__id=settings.SITE_ID,
category__tree_path__startswith=category.tree_path,
publish_from__lte=n
).values()[0][].year
except:
year = n.year
return year
|
Return ARCHIVE_ENTRY_YEAR from settings (if exists) or year of the newest object in category
|
### Input:
Return ARCHIVE_ENTRY_YEAR from settings (if exists) or year of the newest object in category
### Response:
#vtb
def _archive_entry_year(self, category):
" Return ARCHIVE_ENTRY_YEAR from settings (if exists) or year of the newest object in category "
year = getattr(settings, , None)
if not year:
n = now()
try:
year = Listing.objects.filter(
category__site__id=settings.SITE_ID,
category__tree_path__startswith=category.tree_path,
publish_from__lte=n
).values()[0][].year
except:
year = n.year
return year
|
#vtb
def list_bookmarks(self, start_date=None, end_date=None, limit=None):
query = Search(
using=self.client,
index=self.aggregation_alias,
doc_type=self.bookmark_doc_type
).sort({: {: }})
range_args = {}
if start_date:
range_args[] = self._format_range_dt(
start_date.replace(microsecond=0))
if end_date:
range_args[] = self._format_range_dt(
end_date.replace(microsecond=0))
if range_args:
query = query.filter(, date=range_args)
return query[0:limit].execute() if limit else query.scan()
|
List the aggregation's bookmarks.
|
### Input:
List the aggregation's bookmarks.
### Response:
#vtb
def list_bookmarks(self, start_date=None, end_date=None, limit=None):
query = Search(
using=self.client,
index=self.aggregation_alias,
doc_type=self.bookmark_doc_type
).sort({: {: }})
range_args = {}
if start_date:
range_args[] = self._format_range_dt(
start_date.replace(microsecond=0))
if end_date:
range_args[] = self._format_range_dt(
end_date.replace(microsecond=0))
if range_args:
query = query.filter(, date=range_args)
return query[0:limit].execute() if limit else query.scan()
|
#vtb
def UpdateHuntObject(self, hunt_id, start_time=None, **kwargs):
hunt_obj = self.ReadHuntObject(hunt_id)
delta_suffix = "_delta"
for k, v in kwargs.items():
if v is None:
continue
if k.endswith(delta_suffix):
key = k[:-len(delta_suffix)]
current_value = getattr(hunt_obj, key)
setattr(hunt_obj, key, current_value + v)
else:
setattr(hunt_obj, k, v)
if start_time is not None:
hunt_obj.init_start_time = hunt_obj.init_start_time or start_time
hunt_obj.last_start_time = start_time
hunt_obj.last_update_time = rdfvalue.RDFDatetime.Now()
self.hunts[hunt_obj.hunt_id] = hunt_obj
|
Updates the hunt object by applying the update function.
|
### Input:
Updates the hunt object by applying the update function.
### Response:
#vtb
def UpdateHuntObject(self, hunt_id, start_time=None, **kwargs):
hunt_obj = self.ReadHuntObject(hunt_id)
delta_suffix = "_delta"
for k, v in kwargs.items():
if v is None:
continue
if k.endswith(delta_suffix):
key = k[:-len(delta_suffix)]
current_value = getattr(hunt_obj, key)
setattr(hunt_obj, key, current_value + v)
else:
setattr(hunt_obj, k, v)
if start_time is not None:
hunt_obj.init_start_time = hunt_obj.init_start_time or start_time
hunt_obj.last_start_time = start_time
hunt_obj.last_update_time = rdfvalue.RDFDatetime.Now()
self.hunts[hunt_obj.hunt_id] = hunt_obj
|
#vtb
def action_delete(self, courseid, taskid, path):
path = path.strip()
if not path.startswith("/"):
path = "/" + path
wanted_path = self.verify_path(courseid, taskid, path)
if wanted_path is None:
return self.show_tab_file(courseid, taskid, _("Internal error"))
if "/" == wanted_path:
return self.show_tab_file(courseid, taskid, _("Internal error"))
try:
self.task_factory.get_task_fs(courseid, taskid).delete(wanted_path)
return self.show_tab_file(courseid, taskid)
except:
return self.show_tab_file(courseid, taskid, _("An error occurred while deleting the files"))
|
Delete a file or a directory
|
### Input:
Delete a file or a directory
### Response:
#vtb
def action_delete(self, courseid, taskid, path):
path = path.strip()
if not path.startswith("/"):
path = "/" + path
wanted_path = self.verify_path(courseid, taskid, path)
if wanted_path is None:
return self.show_tab_file(courseid, taskid, _("Internal error"))
if "/" == wanted_path:
return self.show_tab_file(courseid, taskid, _("Internal error"))
try:
self.task_factory.get_task_fs(courseid, taskid).delete(wanted_path)
return self.show_tab_file(courseid, taskid)
except:
return self.show_tab_file(courseid, taskid, _("An error occurred while deleting the files"))
|
#vtb
def _try_to_get_extension(obj):
if is_path(obj):
path = obj
elif is_path_obj(obj):
return obj.suffix[1:]
elif is_file_stream(obj):
try:
path = get_path_from_stream(obj)
except ValueError:
return None
elif is_ioinfo(obj):
path = obj.path
else:
return None
if path:
return get_file_extension(path)
return None
|
Try to get file extension from given path or file object.
:param obj: a file, file-like object or something
:return: File extension or None
>>> _try_to_get_extension("a.py")
'py'
|
### Input:
Try to get file extension from given path or file object.
:param obj: a file, file-like object or something
:return: File extension or None
>>> _try_to_get_extension("a.py")
'py'
### Response:
#vtb
def _try_to_get_extension(obj):
if is_path(obj):
path = obj
elif is_path_obj(obj):
return obj.suffix[1:]
elif is_file_stream(obj):
try:
path = get_path_from_stream(obj)
except ValueError:
return None
elif is_ioinfo(obj):
path = obj.path
else:
return None
if path:
return get_file_extension(path)
return None
|
#vtb
def vlm_add_input(self, psz_name, psz_input):
s input MRL. This will add the specified one.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
'
return libvlc_vlm_add_input(self, str_to_bytes(psz_name), str_to_bytes(psz_input))
|
Add a media's input MRL. This will add the specified one.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
|
### Input:
Add a media's input MRL. This will add the specified one.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
### Response:
#vtb
def vlm_add_input(self, psz_name, psz_input):
s input MRL. This will add the specified one.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
'
return libvlc_vlm_add_input(self, str_to_bytes(psz_name), str_to_bytes(psz_input))
|
#vtb
def IsPropertyInMetaIgnoreCase(classId, key):
if classId in _ManagedObjectMeta:
for prop in _ManagedObjectMeta[classId]:
if (prop.lower() == key.lower()):
return _ManagedObjectMeta[classId][prop]
if classId in _MethodFactoryMeta:
for prop in _MethodFactoryMeta[classId]:
if (prop.lower() == key.lower()):
return _MethodFactoryMeta[classId][prop]
return None
|
Methods returns the property meta of the provided key for the given classId. Given key is case insensitive.
|
### Input:
Methods returns the property meta of the provided key for the given classId. Given key is case insensitive.
### Response:
#vtb
def IsPropertyInMetaIgnoreCase(classId, key):
if classId in _ManagedObjectMeta:
for prop in _ManagedObjectMeta[classId]:
if (prop.lower() == key.lower()):
return _ManagedObjectMeta[classId][prop]
if classId in _MethodFactoryMeta:
for prop in _MethodFactoryMeta[classId]:
if (prop.lower() == key.lower()):
return _MethodFactoryMeta[classId][prop]
return None
|
#vtb
def get_hmac(self, key):
h = HMAC.new(key, None, SHA256)
h.update(self.iv)
h.update(str(self.chunks).encode())
h.update(self.f_key)
h.update(self.alpha_key)
h.update(str(self.encrypted).encode())
return h.digest()
|
Returns the keyed HMAC for authentication of this state data.
:param key: the key for the keyed hash function
|
### Input:
Returns the keyed HMAC for authentication of this state data.
:param key: the key for the keyed hash function
### Response:
#vtb
def get_hmac(self, key):
h = HMAC.new(key, None, SHA256)
h.update(self.iv)
h.update(str(self.chunks).encode())
h.update(self.f_key)
h.update(self.alpha_key)
h.update(str(self.encrypted).encode())
return h.digest()
|
#vtb
def paragraph(node):
text =
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph(, .join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o
|
Process a paragraph, which includes all content under it
|
### Input:
Process a paragraph, which includes all content under it
### Response:
#vtb
def paragraph(node):
text =
if node.string_content is not None:
text = node.string_content
o = nodes.paragraph(, .join(text))
o.line = node.sourcepos[0][0]
for n in MarkDown(node):
o.append(n)
return o
|
#vtb
def AddMethod(obj, function, name=None):
if name is None:
name = function.__name__
else:
function = RenameFunction(function, name)
if hasattr(obj, ) and obj.__class__ is not type:
if sys.version_info[:2] > (3, 2):
method = MethodType(function, obj)
else:
method = MethodType(function, obj, obj.__class__)
else:
method = function
setattr(obj, name, method)
|
Adds either a bound method to an instance or the function itself (or an unbound method in Python 2) to a class.
If name is ommited the name of the specified function
is used by default.
Example::
a = A()
def f(self, x, y):
self.z = x + y
AddMethod(f, A, "add")
a.add(2, 4)
print(a.z)
AddMethod(lambda self, i: self.l[i], a, "listIndex")
print(a.listIndex(5))
|
### Input:
Adds either a bound method to an instance or the function itself (or an unbound method in Python 2) to a class.
If name is ommited the name of the specified function
is used by default.
Example::
a = A()
def f(self, x, y):
self.z = x + y
AddMethod(f, A, "add")
a.add(2, 4)
print(a.z)
AddMethod(lambda self, i: self.l[i], a, "listIndex")
print(a.listIndex(5))
### Response:
#vtb
def AddMethod(obj, function, name=None):
if name is None:
name = function.__name__
else:
function = RenameFunction(function, name)
if hasattr(obj, ) and obj.__class__ is not type:
if sys.version_info[:2] > (3, 2):
method = MethodType(function, obj)
else:
method = MethodType(function, obj, obj.__class__)
else:
method = function
setattr(obj, name, method)
|
#vtb
def NHot(n, *xs, simplify=True):
if not isinstance(n, int):
raise TypeError("expected n to be an int")
if not 0 <= n <= len(xs):
fstr = "expected 0 <= n <= {}, got {}"
raise ValueError(fstr.format(len(xs), n))
xs = [Expression.box(x).node for x in xs]
num = len(xs)
terms = list()
for hot_idxs in itertools.combinations(range(num), n):
hot_idxs = set(hot_idxs)
_xs = [xs[i] if i in hot_idxs else exprnode.not_(xs[i])
for i in range(num)]
terms.append(exprnode.and_(*_xs))
y = exprnode.or_(*terms)
if simplify:
y = y.simplify()
return _expr(y)
|
Return an expression that means
"exactly N input functions are true".
If *simplify* is ``True``, return a simplified expression.
|
### Input:
Return an expression that means
"exactly N input functions are true".
If *simplify* is ``True``, return a simplified expression.
### Response:
#vtb
def NHot(n, *xs, simplify=True):
if not isinstance(n, int):
raise TypeError("expected n to be an int")
if not 0 <= n <= len(xs):
fstr = "expected 0 <= n <= {}, got {}"
raise ValueError(fstr.format(len(xs), n))
xs = [Expression.box(x).node for x in xs]
num = len(xs)
terms = list()
for hot_idxs in itertools.combinations(range(num), n):
hot_idxs = set(hot_idxs)
_xs = [xs[i] if i in hot_idxs else exprnode.not_(xs[i])
for i in range(num)]
terms.append(exprnode.and_(*_xs))
y = exprnode.or_(*terms)
if simplify:
y = y.simplify()
return _expr(y)
|
#vtb
def run(self, key, value, num_alts):
field_info = self.header.get_info_field_info(key)
if not isinstance(value, list):
return
TABLE = {
".": len(value),
"A": num_alts,
"R": num_alts + 1,
"G": binomial(num_alts + 1, 2),
}
expected = TABLE.get(field_info.number, field_info.number)
if len(value) != expected:
tpl = "Number of elements for INFO field {} is {} instead of {}"
warnings.warn(
tpl.format(key, len(value), field_info.number), exceptions.IncorrectListLength
)
|
Check value in INFO[key] of record
Currently, only checks for consistent counts are implemented
:param str key: key of INFO entry to check
:param value: value to check
:param int alts: list of alternative alleles, for length
|
### Input:
Check value in INFO[key] of record
Currently, only checks for consistent counts are implemented
:param str key: key of INFO entry to check
:param value: value to check
:param int alts: list of alternative alleles, for length
### Response:
#vtb
def run(self, key, value, num_alts):
field_info = self.header.get_info_field_info(key)
if not isinstance(value, list):
return
TABLE = {
".": len(value),
"A": num_alts,
"R": num_alts + 1,
"G": binomial(num_alts + 1, 2),
}
expected = TABLE.get(field_info.number, field_info.number)
if len(value) != expected:
tpl = "Number of elements for INFO field {} is {} instead of {}"
warnings.warn(
tpl.format(key, len(value), field_info.number), exceptions.IncorrectListLength
)
|
#vtb
def resolve_upload_path(self, filename=None):
if filename is None:
return constants.UPLOAD_VOLUME
return os.path.join(constants.UPLOAD_VOLUME, filename)
|
Resolve upload path for use with the executor.
:param filename: Filename to resolve
:return: Resolved filename, which can be used to access the
given uploaded file in programs executed using this
executor
|
### Input:
Resolve upload path for use with the executor.
:param filename: Filename to resolve
:return: Resolved filename, which can be used to access the
given uploaded file in programs executed using this
executor
### Response:
#vtb
def resolve_upload_path(self, filename=None):
if filename is None:
return constants.UPLOAD_VOLUME
return os.path.join(constants.UPLOAD_VOLUME, filename)
|
#vtb
def _compute_e2_factor(self, imt, vs30):
e2 = np.zeros_like(vs30)
if imt.name == "PGV":
period = 1
elif imt.name == "PGA":
period = 0
else:
period = imt.period
if period < 0.35:
return e2
else:
idx = vs30 <= 1000
if period >= 0.35 and period <= 2.0:
e2[idx] = (-0.25 * np.log(vs30[idx] / 1000) *
np.log(period / 0.35))
elif period > 2.0:
e2[idx] = (-0.25 * np.log(vs30[idx] / 1000) *
np.log(2.0 / 0.35))
return e2
|
Compute and return e2 factor, equation 19, page 80.
|
### Input:
Compute and return e2 factor, equation 19, page 80.
### Response:
#vtb
def _compute_e2_factor(self, imt, vs30):
e2 = np.zeros_like(vs30)
if imt.name == "PGV":
period = 1
elif imt.name == "PGA":
period = 0
else:
period = imt.period
if period < 0.35:
return e2
else:
idx = vs30 <= 1000
if period >= 0.35 and period <= 2.0:
e2[idx] = (-0.25 * np.log(vs30[idx] / 1000) *
np.log(period / 0.35))
elif period > 2.0:
e2[idx] = (-0.25 * np.log(vs30[idx] / 1000) *
np.log(2.0 / 0.35))
return e2
|
#vtb
def OnStartup(self):
last_request = self.transaction_log.Get()
if last_request:
status = rdf_flows.GrrStatus(
status=rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED,
error_message="Client killed during transaction")
if self.nanny_controller:
nanny_status = self.nanny_controller.GetNannyStatus()
if nanny_status:
status.nanny_status = nanny_status
self.SendReply(
status,
request_id=last_request.request_id,
response_id=1,
session_id=last_request.session_id,
message_type=rdf_flows.GrrMessage.Type.STATUS)
self.transaction_log.Clear()
action = admin.SendStartupInfo(grr_worker=self)
action.Run(None, ttl=1)
|
A handler that is called on client startup.
|
### Input:
A handler that is called on client startup.
### Response:
#vtb
def OnStartup(self):
last_request = self.transaction_log.Get()
if last_request:
status = rdf_flows.GrrStatus(
status=rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED,
error_message="Client killed during transaction")
if self.nanny_controller:
nanny_status = self.nanny_controller.GetNannyStatus()
if nanny_status:
status.nanny_status = nanny_status
self.SendReply(
status,
request_id=last_request.request_id,
response_id=1,
session_id=last_request.session_id,
message_type=rdf_flows.GrrMessage.Type.STATUS)
self.transaction_log.Clear()
action = admin.SendStartupInfo(grr_worker=self)
action.Run(None, ttl=1)
|
#vtb
def get_all_triggers(bump, file_triggers):
triggers = set()
if file_triggers:
triggers = triggers.union(detect_file_triggers(config.trigger_patterns))
if bump:
_LOG.debug("trigger: %s bump requested", bump)
triggers.add(bump)
return triggers
|
Aggregated set of significant figures to bump
|
### Input:
Aggregated set of significant figures to bump
### Response:
#vtb
def get_all_triggers(bump, file_triggers):
triggers = set()
if file_triggers:
triggers = triggers.union(detect_file_triggers(config.trigger_patterns))
if bump:
_LOG.debug("trigger: %s bump requested", bump)
triggers.add(bump)
return triggers
|
#vtb
def apply_correlation(self, sites, imt, residuals, stddev_intra=0):
try:
corma = self.cache[imt]
except KeyError:
corma = self.get_lower_triangle_correlation_matrix(
sites.complete, imt)
self.cache[imt] = corma
if len(sites.complete) == len(sites):
return numpy.dot(corma, residuals)
return numpy.sum(corma[sites.sids, sid] * res
for sid, res in zip(sites.sids, residuals))
|
Apply correlation to randomly sampled residuals.
:param sites:
:class:`~openquake.hazardlib.site.SiteCollection` residuals were
sampled for.
:param imt:
Intensity measure type object, see :mod:`openquake.hazardlib.imt`.
:param residuals:
2d numpy array of sampled residuals, where first dimension
represents sites (the length as ``sites`` parameter) and
second one represents different realizations (samples).
:param stddev_intra:
Intra-event standard deviation array. Note that different sites do
not necessarily have the same intra-event standard deviation.
:returns:
Array of the same structure and semantics as ``residuals``
but with correlations applied.
NB: the correlation matrix is cached. It is computed only once
per IMT for the complete site collection and then the portion
corresponding to the sites is multiplied by the residuals.
|
### Input:
Apply correlation to randomly sampled residuals.
:param sites:
:class:`~openquake.hazardlib.site.SiteCollection` residuals were
sampled for.
:param imt:
Intensity measure type object, see :mod:`openquake.hazardlib.imt`.
:param residuals:
2d numpy array of sampled residuals, where first dimension
represents sites (the length as ``sites`` parameter) and
second one represents different realizations (samples).
:param stddev_intra:
Intra-event standard deviation array. Note that different sites do
not necessarily have the same intra-event standard deviation.
:returns:
Array of the same structure and semantics as ``residuals``
but with correlations applied.
NB: the correlation matrix is cached. It is computed only once
per IMT for the complete site collection and then the portion
corresponding to the sites is multiplied by the residuals.
### Response:
#vtb
def apply_correlation(self, sites, imt, residuals, stddev_intra=0):
try:
corma = self.cache[imt]
except KeyError:
corma = self.get_lower_triangle_correlation_matrix(
sites.complete, imt)
self.cache[imt] = corma
if len(sites.complete) == len(sites):
return numpy.dot(corma, residuals)
return numpy.sum(corma[sites.sids, sid] * res
for sid, res in zip(sites.sids, residuals))
|
#vtb
def formatTime(self, record, datefmt=None):
if datefmt:
s = datetime.datetime.now().strftime(datefmt)
else:
t = datetime.datetime.now().strftime(self.default_time_format)
s = self.default_msec_format % (t, record.msecs)
return s
|
Overrides formatTime method to use datetime module instead of time module
to display time in microseconds. Time module by default does not resolve
time to microseconds.
|
### Input:
Overrides formatTime method to use datetime module instead of time module
to display time in microseconds. Time module by default does not resolve
time to microseconds.
### Response:
#vtb
def formatTime(self, record, datefmt=None):
if datefmt:
s = datetime.datetime.now().strftime(datefmt)
else:
t = datetime.datetime.now().strftime(self.default_time_format)
s = self.default_msec_format % (t, record.msecs)
return s
|
#vtb
def _enrich_link(self, glossary):
try:
Model = apps.get_model(*glossary[][].split())
obj = Model.objects.get(pk=glossary[][])
glossary[].update(identifier=str(obj))
except (KeyError, ObjectDoesNotExist):
pass
|
Enrich the dict glossary['link'] with an identifier onto the model
|
### Input:
Enrich the dict glossary['link'] with an identifier onto the model
### Response:
#vtb
def _enrich_link(self, glossary):
try:
Model = apps.get_model(*glossary[][].split())
obj = Model.objects.get(pk=glossary[][])
glossary[].update(identifier=str(obj))
except (KeyError, ObjectDoesNotExist):
pass
|
#vtb
def datapoint_indices_for_tensor(self, tensor_index):
if tensor_index >= self._num_tensors:
raise ValueError( %(tensor_index, self._num_tensors))
return self._file_num_to_indices[tensor_index]
|
Returns the indices for all datapoints in the given tensor.
|
### Input:
Returns the indices for all datapoints in the given tensor.
### Response:
#vtb
def datapoint_indices_for_tensor(self, tensor_index):
if tensor_index >= self._num_tensors:
raise ValueError( %(tensor_index, self._num_tensors))
return self._file_num_to_indices[tensor_index]
|
#vtb
def _python_type(self, key, value):
try:
field_type = self._sp_cols[key][]
if field_type in [, ]:
return float(value)
elif field_type == :
value = self.date_format.search(value).group(0)
return datetime.strptime(value, )
elif field_type == :
if value == :
return
elif value == :
return
else:
return
elif field_type in (, ):
if value in self.users[]:
return self.users[][value]
elif in value:
return value.split()[1]
else:
return value
else:
return value
except AttributeError:
return value
|
Returns proper type from the schema
|
### Input:
Returns proper type from the schema
### Response:
#vtb
def _python_type(self, key, value):
try:
field_type = self._sp_cols[key][]
if field_type in [, ]:
return float(value)
elif field_type == :
value = self.date_format.search(value).group(0)
return datetime.strptime(value, )
elif field_type == :
if value == :
return
elif value == :
return
else:
return
elif field_type in (, ):
if value in self.users[]:
return self.users[][value]
elif in value:
return value.split()[1]
else:
return value
else:
return value
except AttributeError:
return value
|
#vtb
def readGif(filename, asNumpy=True):
if PIL is None:
raise RuntimeError("Need PIL to read animated gif files.")
if np is None:
raise RuntimeError("Need Numpy to read animated gif files.")
if not os.path.isfile(filename):
raise IOError( + str(filename))
pilIm = PIL.Image.open(filename)
pilIm.seek(0)
images = []
try:
while True:
tmp = pilIm.convert()
a = np.asarray(tmp)
if len(a.shape) == 0:
raise MemoryError("Too little memory to convert PIL image to array")
images.append(a)
pilIm.seek(pilIm.tell() + 1)
except EOFError:
pass
if not asNumpy:
images2 = images
images = []
for im in images2:
images.append(PIL.Image.fromarray(im))
return images
|
readGif(filename, asNumpy=True)
Read images from an animated GIF file. Returns a list of numpy
arrays, or, if asNumpy is false, a list if PIL images.
|
### Input:
readGif(filename, asNumpy=True)
Read images from an animated GIF file. Returns a list of numpy
arrays, or, if asNumpy is false, a list if PIL images.
### Response:
#vtb
def readGif(filename, asNumpy=True):
if PIL is None:
raise RuntimeError("Need PIL to read animated gif files.")
if np is None:
raise RuntimeError("Need Numpy to read animated gif files.")
if not os.path.isfile(filename):
raise IOError( + str(filename))
pilIm = PIL.Image.open(filename)
pilIm.seek(0)
images = []
try:
while True:
tmp = pilIm.convert()
a = np.asarray(tmp)
if len(a.shape) == 0:
raise MemoryError("Too little memory to convert PIL image to array")
images.append(a)
pilIm.seek(pilIm.tell() + 1)
except EOFError:
pass
if not asNumpy:
images2 = images
images = []
for im in images2:
images.append(PIL.Image.fromarray(im))
return images
|
#vtb
def lookup_thread_id(self):
query_string = % (
self.topic, self.owner, self.realm)
cache_key = (self.owner, self.realm, self.topic)
result = self.lookup_cache_key(cache_key)
if result is not None:
my_req = self.raw_pull(result)
if my_req.status_code != 200:
result = None
elif my_req.json()[] != self.topic:
logging.debug()
result = None
else:
logging.debug(, str(result),
str(cache_key))
return result
data, dummy_hdr = self.raw_search(self.user, self.token, query_string)
if data[] == 1:
if data[][0][] == self.topic:
result = data[][0][]
else:
result = None
elif data[] > 1:
else:
result = None
self.update_cache_key(cache_key, result)
return result
|
Lookup thread id as required by CommentThread.lookup_thread_id.
This implementation will query GitHub with the required parameters
to try and find the topic for the owner, realm, topic, etc., specified
in init.
|
### Input:
Lookup thread id as required by CommentThread.lookup_thread_id.
This implementation will query GitHub with the required parameters
to try and find the topic for the owner, realm, topic, etc., specified
in init.
### Response:
#vtb
def lookup_thread_id(self):
query_string = % (
self.topic, self.owner, self.realm)
cache_key = (self.owner, self.realm, self.topic)
result = self.lookup_cache_key(cache_key)
if result is not None:
my_req = self.raw_pull(result)
if my_req.status_code != 200:
result = None
elif my_req.json()[] != self.topic:
logging.debug()
result = None
else:
logging.debug(, str(result),
str(cache_key))
return result
data, dummy_hdr = self.raw_search(self.user, self.token, query_string)
if data[] == 1:
if data[][0][] == self.topic:
result = data[][0][]
else:
result = None
elif data[] > 1:
else:
result = None
self.update_cache_key(cache_key, result)
return result
|
#vtb
def _concrete_instance(self, instance_doc):
if not isinstance(instance_doc, dict):
return None
try:
service = instance_doc[]
cls = self._service_class_map[service]
return cls(instance_document=instance_doc, instances=self)
except Exception as ex:
logger.exception(ex)
logger.error(
.format(instance_doc)
)
return None
|
Concretize an instance document.
:param dict instance_doc: A document describing an instance. Should come from the API.
:returns: A subclass of :py:class:`bases.BaseInstance`, or None.
:rtype: :py:class:`bases.BaseInstance`
|
### Input:
Concretize an instance document.
:param dict instance_doc: A document describing an instance. Should come from the API.
:returns: A subclass of :py:class:`bases.BaseInstance`, or None.
:rtype: :py:class:`bases.BaseInstance`
### Response:
#vtb
def _concrete_instance(self, instance_doc):
if not isinstance(instance_doc, dict):
return None
try:
service = instance_doc[]
cls = self._service_class_map[service]
return cls(instance_document=instance_doc, instances=self)
except Exception as ex:
logger.exception(ex)
logger.error(
.format(instance_doc)
)
return None
|
#vtb
def __write(self, s):
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
|
Write string s to the stream if a whole new block
is ready to be written.
|
### Input:
Write string s to the stream if a whole new block
is ready to be written.
### Response:
#vtb
def __write(self, s):
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.