prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>ToolbarPanel.cpp<|end_file_name|><|fim▁begin|>#include "ToolbarPanel.h"
#include "StagePanel.h"
#include "SelectSpritesOP.h"
#include "Context.h"
namespace coceditor
{
ToolbarPanel::ToolbarPanel(wxWindow* parent)
: ee::ToolbarPanel(parent, Context::Instance()->stage)
{
Context* context = Context::Instance();
// addChild(new ee::UniversalCMPT(this, wxT("paste"), context->stage,
// new ee::ArrangeSpriteOP<ee::SelectSpritesOP>(context->stage, context->stage)));
<|fim▁hole|> SetSizer(initLayout());
}
wxSizer* ToolbarPanel::initLayout()
{
wxBoxSizer* topSizer = new wxBoxSizer(wxVERTICAL);
topSizer->Add(initChildrenLayout());
return topSizer;
}
} // coceditor<|fim▁end|> | addChild(new ee::UniversalCMPT(this, wxT("paste"), context->stage,
new ee::ArrangeSpriteOP<SelectSpritesOP>(context->stage, context->stage, context->property)));
|
<|file_name|>Configuration.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher ([email protected])
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019-2020 DESY, Hamburg, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import numpy as np
from copy import deepcopy
import h5py
from .util import Signal
from .util.ImgCorrection import CbnCorrection, ObliqueAngleDetectorAbsorptionCorrection
from .util import Pattern
from .util.calc import convert_units
from . import ImgModel, CalibrationModel, MaskModel, PatternModel, BatchModel
from .CalibrationModel import DetectorModes
class Configuration(object):
"""
The configuration class contains a working combination of an ImgModel, PatternModel, MaskModel and CalibrationModel.
It does handles the core data manipulation of Dioptas.
The management of multiple Configurations is done by the DioptasModel.
"""
def __init__(self, working_directories=None):
super(Configuration, self).__init__()
self.img_model = ImgModel()
self.mask_model = MaskModel()
self.calibration_model = CalibrationModel(self.img_model)
self.batch_model = BatchModel(self.calibration_model, self.mask_model)
self.pattern_model = PatternModel()
if working_directories is None:
self.working_directories = {'calibration': '', 'mask': '', 'image': os.path.expanduser("~"), 'pattern': '',
'overlay': '', 'phase': '', 'batch': os.path.expanduser("~")}
else:
self.working_directories = working_directories
self.use_mask = False
self.transparent_mask = False
self._integration_rad_points = None
self._integration_unit = '2th_deg'
self._oned_azimuth_range = None
self._cake_azimuth_points = 360
self._cake_azimuth_range = None
<|fim▁hole|> self.auto_save_integrated_pattern = False
self.integrated_patterns_file_formats = ['.xy']
self.cake_changed = Signal()
self._connect_signals()
def _connect_signals(self):
"""
Connects the img_changed signal to responding functions.
"""
self.img_model.img_changed.connect(self.update_mask_dimension)
self.img_model.img_changed.connect(self.integrate_image_1d)
def integrate_image_1d(self):
"""
Integrates the image in the ImageModel to a Pattern. Will also automatically save the integrated pattern, if
auto_save_integrated is True.
"""
if self.calibration_model.is_calibrated:
if self.use_mask:
mask = self.mask_model.get_mask()
elif self.mask_model.roi is not None:
mask = self.mask_model.roi_mask
else:
mask = None
x, y = self.calibration_model.integrate_1d(azi_range=self.oned_azimuth_range, mask=mask, unit=self.integration_unit,
num_points=self.integration_rad_points)
self.pattern_model.set_pattern(x, y, self.img_model.filename, unit=self.integration_unit) #
if self.auto_save_integrated_pattern:
self._auto_save_patterns()
def integrate_image_2d(self):
"""
Integrates the image in the ImageModel to a Cake.
"""
if self.use_mask:
mask = self.mask_model.get_mask()
elif self.mask_model.roi is not None:
mask = self.mask_model.roi_mask
else:
mask = None
self.calibration_model.integrate_2d(mask=mask,
rad_points=self._integration_rad_points,
azimuth_points=self._cake_azimuth_points,
azimuth_range=self._cake_azimuth_range)
self.cake_changed.emit()
def save_pattern(self, filename=None, subtract_background=False):
"""
Saves the current integrated pattern. The format depends on the file ending. Possible file formats:
[*.xy, *.chi, *.dat, *.fxye]
:param filename: where to save the file
:param subtract_background: flat whether the pattern should be saved with or without subtracted background
"""
if filename is None:
filename = self.img_model.filename
if filename.endswith('.xy'):
self.pattern_model.save_pattern(filename, header=self._create_xy_header(),
subtract_background=subtract_background)
elif filename.endswith('.fxye'):
self.pattern_model.save_pattern(filename, header=self._create_fxye_header(filename),
subtract_background=subtract_background)
else:
self.pattern_model.save_pattern(filename, subtract_background=subtract_background)
def save_background_pattern(self, filename=None):
"""
Saves the current fit background as a pattern. The format depends on the file ending. Possible file formats:
[*.xy, *.chi, *.dat, *.fxye]
"""
if filename is None:
filename = self.img_model.filename
if filename.endswith('.xy'):
self.pattern_model.save_auto_background_as_pattern(filename, header=self._create_xy_header())
elif filename.endswith('.fxye'):
self.pattern_model.save_auto_background_as_pattern(filename, header=self._create_fxye_header(filename))
else:
self.pattern_model.save_pattern(filename)
def _create_xy_header(self):
"""
Creates the header for the xy file format (contains information about calibration parameters).
:return: header string
"""
header = self.calibration_model.create_file_header()
header = header.replace('\r\n', '\n')
header = header + '\n#\n# ' + self._integration_unit + '\t I'
return header
def _create_fxye_header(self, filename):
"""
Creates the header for the fxye file format (used by GSAS and GSAS-II) containing the calibration information
:return: header string
"""
header = 'Generated file ' + filename + ' using DIOPTAS\n'
header = header + self.calibration_model.create_file_header()
unit = self._integration_unit
lam = self.calibration_model.wavelength
if unit == 'q_A^-1':
con = 'CONQ'
else:
con = 'CONS'
header = header + '\nBANK\t1\tNUM_POINTS\tNUM_POINTS ' + con + '\tMIN_X_VAL\tSTEP_X_VAL ' + \
'{0:.5g}'.format(lam * 1e10) + ' 0.0 FXYE'
return header
def _auto_save_patterns(self):
"""
Saves the current pattern in the pattern working directory (specified in self.working_directories['pattern'].
When background subtraction is enabled in the pattern model the pattern will be saved with background
subtraction and without in another sub-folder. ('bkg_subtracted')
"""
for file_ending in self.integrated_patterns_file_formats:
filename = os.path.join(
self.working_directories['pattern'],
os.path.basename(str(self.img_model.filename)).split('.')[:-1][0] + file_ending)
filename = filename.replace('\\', '/')
self.save_pattern(filename)
if self.pattern_model.pattern.has_background():
for file_ending in self.integrated_patterns_file_formats:
directory = os.path.join(self.working_directories['pattern'], 'bkg_subtracted')
if not os.path.exists(directory):
os.mkdir(directory)
filename = os.path.join(directory, self.pattern_model.pattern.name + file_ending)
filename = filename.replace('\\', '/')
self.save_pattern(filename, subtract_background=True)
def update_mask_dimension(self):
"""
Updates the shape of the mask in the MaskModel to the shape of the image in the ImageModel.
"""
self.mask_model.set_dimension(self.img_model._img_data.shape)
@property
def integration_rad_points(self):
return self._integration_rad_points
@integration_rad_points.setter
def integration_rad_points(self, new_value):
self._integration_rad_points = new_value
self.integrate_image_1d()
if self.auto_integrate_cake:
self.integrate_image_2d()
@property
def cake_azimuth_points(self):
return self._cake_azimuth_points
@cake_azimuth_points.setter
def cake_azimuth_points(self, new_value):
self._cake_azimuth_points = new_value
if self.auto_integrate_cake:
self.integrate_image_2d()
@property
def cake_azimuth_range(self):
return self._cake_azimuth_range
@cake_azimuth_range.setter
def cake_azimuth_range(self, new_value):
self._cake_azimuth_range = new_value
if self.auto_integrate_cake:
self.integrate_image_2d()
@property
def oned_azimuth_range(self):
return self._oned_azimuth_range
@oned_azimuth_range.setter
def oned_azimuth_range(self, new_value):
self._oned_azimuth_range = new_value
if self.auto_integrate_pattern:
self.integrate_image_1d()
@property
def integration_unit(self):
return self._integration_unit
@integration_unit.setter
def integration_unit(self, new_unit):
old_unit = self.integration_unit
self._integration_unit = new_unit
auto_bg_subtraction = self.pattern_model.pattern.auto_background_subtraction
if auto_bg_subtraction:
self.pattern_model.pattern.auto_background_subtraction = False
self.integrate_image_1d()
self.update_auto_background_parameters_unit(old_unit, new_unit)
if auto_bg_subtraction:
self.pattern_model.pattern.auto_background_subtraction = True
self.pattern_model.pattern.recalculate_pattern()
self.pattern_model.pattern_changed.emit()
@property
def correct_solid_angle(self):
return self.calibration_model.correct_solid_angle
@correct_solid_angle.setter
def correct_solid_angle(self, new_val):
self.calibration_model.correct_solid_angle = new_val
if self.auto_integrate_pattern:
self.integrate_image_1d()
if self._auto_integrate_cake:
self.integrate_image_2d()
def update_auto_background_parameters_unit(self, old_unit, new_unit):
"""
This handles the changes for the auto background subtraction parameters in the PatternModel when the integration
unit is changed.
:param old_unit: possible values are '2th_deg', 'q_A^-1', 'd_A'
:param new_unit: possible values are '2th_deg', 'q_A^-1', 'd_A'
"""
par_0 = convert_units(self.pattern_model.pattern.auto_background_subtraction_parameters[0],
self.calibration_model.wavelength,
old_unit,
new_unit)
# Value of 0.1 let background subtraction algorithm work without crash.
if np.isnan(par_0):
par_0 = 0.1
self.pattern_model.pattern.auto_background_subtraction_parameters = \
par_0, \
self.pattern_model.pattern.auto_background_subtraction_parameters[1], \
self.pattern_model.pattern.auto_background_subtraction_parameters[2]
if self.pattern_model.pattern.auto_background_subtraction_roi is not None:
self.pattern_model.pattern.auto_background_subtraction_roi = \
convert_units(self.pattern_model.pattern.auto_background_subtraction_roi[0],
self.calibration_model.wavelength,
old_unit,
new_unit), \
convert_units(self.pattern_model.pattern.auto_background_subtraction_roi[1],
self.calibration_model.wavelength,
old_unit,
new_unit)
@property
def auto_integrate_cake(self):
return self._auto_integrate_cake
@auto_integrate_cake.setter
def auto_integrate_cake(self, new_value):
if self._auto_integrate_cake == new_value:
return
self._auto_integrate_cake = new_value
if new_value:
self.img_model.img_changed.connect(self.integrate_image_2d)
else:
self.img_model.img_changed.disconnect(self.integrate_image_2d)
@property
def auto_integrate_pattern(self):
return self._auto_integrate_pattern
@auto_integrate_pattern.setter
def auto_integrate_pattern(self, new_value):
if self._auto_integrate_pattern == new_value:
return
self._auto_integrate_pattern = new_value
if new_value:
self.img_model.img_changed.connect(self.integrate_image_1d)
else:
self.img_model.img_changed.disconnect(self.integrate_image_1d)
@property
def cake_img(self):
return self.calibration_model.cake_img
@property
def roi(self):
return self.mask_model.roi
@roi.setter
def roi(self, new_val):
self.mask_model.roi = new_val
self.integrate_image_1d()
def copy(self):
"""
Creates a copy of the current working directory
:return: copied configuration
:rtype: Configuration
"""
new_configuration = Configuration(self.working_directories)
new_configuration.img_model._img_data = self.img_model._img_data
new_configuration.img_model.img_transformations = deepcopy(self.img_model.img_transformations)
new_configuration.calibration_model.set_pyFAI(self.calibration_model.get_calibration_parameter()[0])
new_configuration.integrate_image_1d()
return new_configuration
def save_in_hdf5(self, hdf5_group):
"""
Saves the configuration group in the given hdf5_group.
:type hdf5_group: h5py.Group
"""
f = hdf5_group
# save general information
general_information = f.create_group('general_information')
# integration parameters:
general_information.attrs['integration_unit'] = self.integration_unit
if self.integration_rad_points:
general_information.attrs['integration_num_points'] = self.integration_rad_points
else:
general_information.attrs['integration_num_points'] = 0
# cake parameters:
general_information.attrs['auto_integrate_cake'] = self.auto_integrate_cake
general_information.attrs['cake_azimuth_points'] = self.cake_azimuth_points
if self.cake_azimuth_range is None:
general_information.attrs['cake_azimuth_range'] = "None"
else:
general_information.attrs['cake_azimuth_range'] = self.cake_azimuth_range
# mask parameters
general_information.attrs['use_mask'] = self.use_mask
general_information.attrs['transparent_mask'] = self.transparent_mask
# auto save parameters
general_information.attrs['auto_save_integrated_pattern'] = self.auto_save_integrated_pattern
formats = [n.encode('ascii', 'ignore') for n in self.integrated_patterns_file_formats]
general_information.create_dataset('integrated_patterns_file_formats', (len(formats), 1), 'S10', formats)
# save working directories
working_directories_gp = f.create_group('working_directories')
try:
for key in self.working_directories:
working_directories_gp.attrs[key] = self.working_directories[key]
except TypeError:
self.working_directories = {'calibration': '', 'mask': '', 'image': '', 'pattern': '', 'overlay': '',
'phase': '', 'batch': ''}
for key in self.working_directories:
working_directories_gp.attrs[key] = self.working_directories[key]
# save image model
image_group = f.create_group('image_model')
image_group.attrs['auto_process'] = self.img_model.autoprocess
image_group.attrs['factor'] = self.img_model.factor
image_group.attrs['has_background'] = self.img_model.has_background()
image_group.attrs['background_filename'] = self.img_model.background_filename
image_group.attrs['background_offset'] = self.img_model.background_offset
image_group.attrs['background_scaling'] = self.img_model.background_scaling
if self.img_model.has_background():
background_data = self.img_model.untransformed_background_data
image_group.create_dataset('background_data', background_data.shape, 'f', background_data)
image_group.attrs['series_max'] = self.img_model.series_max
image_group.attrs['series_pos'] = self.img_model.series_pos
# image corrections
corrections_group = image_group.create_group('corrections')
corrections_group.attrs['has_corrections'] = self.img_model.has_corrections()
for correction, correction_object in self.img_model.img_corrections.corrections.items():
if correction in ['cbn', 'oiadac']:
correction_data = correction_object.get_data()
imcd = corrections_group.create_dataset(correction, correction_data.shape, 'f', correction_data)
for param, value in correction_object.get_params().items():
imcd.attrs[param] = value
elif correction == 'transfer':
params = correction_object.get_params()
transfer_group = corrections_group.create_group('transfer')
original_data = params['original_data']
response_data = params['response_data']
original_ds = transfer_group.create_dataset('original_data', original_data.shape, 'f', original_data)
original_ds.attrs['filename'] = params['original_filename']
response_ds = transfer_group.create_dataset('response_data', response_data.shape, 'f', response_data)
response_ds.attrs['filename'] = params['response_filename']
# the actual image
image_group.attrs['filename'] = self.img_model.filename
current_raw_image = self.img_model.untransformed_raw_img_data
raw_image_data = image_group.create_dataset('raw_image_data', current_raw_image.shape, dtype='f')
raw_image_data[...] = current_raw_image
# image transformations
transformations_group = image_group.create_group('image_transformations')
for ind, transformation in enumerate(self.img_model.get_transformations_string_list()):
transformations_group.attrs[str(ind)] = transformation
# save roi data
if self.roi is not None:
image_group.attrs['has_roi'] = True
image_group.create_dataset('roi', (4,), 'i8', tuple(self.roi))
else:
image_group.attrs['has_roi'] = False
# save mask model
mask_group = f.create_group('mask')
current_mask = self.mask_model.get_mask()
mask_data = mask_group.create_dataset('data', current_mask.shape, dtype=bool)
mask_data[...] = current_mask
# save detector information
detector_group = f.create_group('detector')
detector_mode = self.calibration_model.detector_mode
detector_group.attrs['detector_mode'] = detector_mode.value
if detector_mode == DetectorModes.PREDEFINED:
detector_group.attrs['detector_name'] = self.calibration_model.detector.name
elif detector_mode == DetectorModes.NEXUS:
detector_group.attrs['nexus_filename'] =self.calibration_model.detector.filename
# save calibration model
calibration_group = f.create_group('calibration_model')
calibration_filename = self.calibration_model.filename
if calibration_filename.endswith('.poni'):
base_filename, ext = self.calibration_model.filename.rsplit('.', 1)
else:
base_filename = self.calibration_model.filename
ext = 'poni'
calibration_group.attrs['calibration_filename'] = base_filename + '.' + ext
pyfai_param, fit2d_param = self.calibration_model.get_calibration_parameter()
pfp = calibration_group.create_group('pyfai_parameters')
for key in pyfai_param:
try:
pfp.attrs[key] = pyfai_param[key]
except TypeError:
pfp.attrs[key] = ''
calibration_group.attrs['correct_solid_angle'] = self.correct_solid_angle
if self.calibration_model.distortion_spline_filename is not None:
calibration_group.attrs['distortion_spline_filename'] = self.calibration_model.distortion_spline_filename
# save background pattern and pattern model
background_pattern_group = f.create_group('background_pattern')
try:
background_pattern_x = self.pattern_model.background_pattern.original_x
background_pattern_y = self.pattern_model.background_pattern.original_y
except (TypeError, AttributeError):
background_pattern_x = None
background_pattern_y = None
if background_pattern_x is not None and background_pattern_y is not None:
background_pattern_group.attrs['has_background_pattern'] = True
bgx = background_pattern_group.create_dataset('x', background_pattern_x.shape, dtype='f')
bgy = background_pattern_group.create_dataset('y', background_pattern_y.shape, dtype='f')
bgx[...] = background_pattern_x
bgy[...] = background_pattern_y
else:
background_pattern_group.attrs['has_background_pattern'] = False
pattern_group = f.create_group('pattern')
try:
pattern_x = self.pattern_model.pattern.original_x
pattern_y = self.pattern_model.pattern.original_y
except (TypeError, AttributeError):
pattern_x = None
pattern_y = None
if pattern_x is not None and pattern_y is not None:
px = pattern_group.create_dataset('x', pattern_x.shape, dtype='f')
py = pattern_group.create_dataset('y', pattern_y.shape, dtype='f')
px[...] = pattern_x
py[...] = pattern_y
pattern_group.attrs['pattern_filename'] = self.pattern_model.pattern_filename
pattern_group.attrs['unit'] = self.pattern_model.unit
pattern_group.attrs['file_iteration_mode'] = self.pattern_model.file_iteration_mode
if self.pattern_model.pattern.auto_background_subtraction:
pattern_group.attrs['auto_background_subtraction'] = True
auto_background_group = pattern_group.create_group('auto_background_settings')
auto_background_group.attrs['smoothing'] = \
self.pattern_model.pattern.auto_background_subtraction_parameters[0]
auto_background_group.attrs['iterations'] = \
self.pattern_model.pattern.auto_background_subtraction_parameters[1]
auto_background_group.attrs['poly_order'] = \
self.pattern_model.pattern.auto_background_subtraction_parameters[2]
auto_background_group.attrs['x_start'] = self.pattern_model.pattern.auto_background_subtraction_roi[0]
auto_background_group.attrs['x_end'] = self.pattern_model.pattern.auto_background_subtraction_roi[1]
else:
pattern_group.attrs['auto_background_subtraction'] = False
def load_from_hdf5(self, hdf5_group):
"""
Loads a configuration from the specified hdf5_group.
:type hdf5_group: h5py.Group
"""
f = hdf5_group
# disable all automatic functions
self.auto_integrate_pattern = False
self.auto_integrate_cake = False
self.auto_save_integrated_pattern = False
# get working directories
working_directories = {}
for key, value in f.get('working_directories').attrs.items():
if os.path.isdir(value):
working_directories[key] = value
else:
working_directories[key] = ''
self.working_directories = working_directories
# load pyFAI parameters
pyfai_parameters = {}
for key, value in f.get('calibration_model').get('pyfai_parameters').attrs.items():
pyfai_parameters[key] = value
try:
self.calibration_model.set_pyFAI(pyfai_parameters)
filename = f.get('calibration_model').attrs['calibration_filename']
(file_path, base_name) = os.path.split(filename)
self.calibration_model.filename = filename
self.calibration_model.calibration_name = base_name
except (KeyError, ValueError):
print('Problem with saved pyFAI calibration parameters')
pass
try:
self.correct_solid_angle = f.get('calibration_model').attrs['correct_solid_angle']
except KeyError:
pass
try:
distortion_spline_filename = f.get('calibration_model').attrs['distortion_spline_filename']
self.calibration_model.load_distortion(distortion_spline_filename)
except KeyError:
pass
# load detector definition
try:
detector_mode = f.get('detector').attrs['detector_mode']
if detector_mode == DetectorModes.PREDEFINED.value:
detector_name = f.get('detector').attrs['detector_name']
self.calibration_model.load_detector(detector_name)
elif detector_mode == DetectorModes.NEXUS.value:
nexus_filename = f.get('detector').attrs['nexus_filename']
self.calibration_model.load_detector_from_file(nexus_filename)
except AttributeError: # to ensure backwards compatibility
pass
# load img_model
self.img_model._img_data = np.copy(f.get('image_model').get('raw_image_data')[...])
filename = f.get('image_model').attrs['filename']
self.img_model.filename = filename
try:
self.img_model.file_name_iterator.update_filename(filename)
self.img_model._directory_watcher.path = os.path.dirname(filename)
except EnvironmentError:
pass
self.img_model.autoprocess = f.get('image_model').attrs['auto_process']
self.img_model.autoprocess_changed.emit()
self.img_model.factor = f.get('image_model').attrs['factor']
try:
self.img_model.series_max = f.get('image_model').attrs['series_max']
self.img_model.series_pos = f.get('image_model').attrs['series_pos']
except KeyError:
pass
if f.get('image_model').attrs['has_background']:
self.img_model.background_data = np.copy(f.get('image_model').get('background_data')[...])
self.img_model.background_filename = f.get('image_model').attrs['background_filename']
self.img_model.background_scaling = f.get('image_model').attrs['background_scaling']
self.img_model.background_offset = f.get('image_model').attrs['background_offset']
# load image transformations
transformation_group = f.get('image_model').get('image_transformations')
transformation_list = []
for key, transformation in transformation_group.attrs.items():
transformation_list.append(transformation)
self.calibration_model.load_transformations_string_list(transformation_list)
self.img_model.load_transformations_string_list(transformation_list)
# load roi data
if f.get('image_model').attrs['has_roi']:
self.roi = tuple(f.get('image_model').get('roi')[...])
# load mask model
self.mask_model.set_mask(np.copy(f.get('mask').get('data')[...]))
# load pattern model
if f.get('pattern').get('x') and f.get('pattern').get('y'):
self.pattern_model.set_pattern(f.get('pattern').get('x')[...],
f.get('pattern').get('y')[...],
f.get('pattern').attrs['pattern_filename'],
f.get('pattern').attrs['unit'])
self.pattern_model.file_iteration_mode = f.get('pattern').attrs['file_iteration_mode']
self.integration_unit = f.get('general_information').attrs['integration_unit']
if f.get('background_pattern').attrs['has_background_pattern']:
self.pattern_model.background_pattern = Pattern(f.get('background_pattern').get('x')[...],
f.get('background_pattern').get('y')[...],
'background_pattern')
if f.get('pattern').attrs['auto_background_subtraction']:
bg_params = []
bg_roi = []
bg_params.append(f.get('pattern').get('auto_background_settings').attrs['smoothing'])
bg_params.append(f.get('pattern').get('auto_background_settings').attrs['iterations'])
bg_params.append(f.get('pattern').get('auto_background_settings').attrs['poly_order'])
bg_roi.append(f.get('pattern').get('auto_background_settings').attrs['x_start'])
bg_roi.append(f.get('pattern').get('auto_background_settings').attrs['x_end'])
self.pattern_model.pattern.set_auto_background_subtraction(bg_params, bg_roi,
recalc_pattern=False)
# load general configuration
if f.get('general_information').attrs['integration_num_points']:
self.integration_rad_points = f.get('general_information').attrs['integration_num_points']
# cake parameters:
self.auto_integrate_cake = f.get('general_information').attrs['auto_integrate_cake']
try:
self.cake_azimuth_points = f.get('general_information').attrs['cake_azimuth_points']
except KeyError as e:
pass
try:
if f.get('general_information').attrs['cake_azimuth_range'] == "None":
self.cake_azimuth_range = None
else:
self.cake_azimuth_range = f.get('general_information').attrs['cake_azimuth_range']
except KeyError as e:
pass
# mask parameters
self.use_mask = f.get('general_information').attrs['use_mask']
self.transparent_mask = f.get('general_information').attrs['transparent_mask']
# corrections
if f.get('image_model').get('corrections').attrs['has_corrections']:
for name, correction_group in f.get('image_model').get('corrections').items():
params = {}
for param, val in correction_group.attrs.items():
params[param] = val
if name == 'cbn':
tth_array = 180.0 / np.pi * self.calibration_model.pattern_geometry.ttha
azi_array = 180.0 / np.pi * self.calibration_model.pattern_geometry.chia
cbn_correction = CbnCorrection(tth_array=tth_array, azi_array=azi_array)
cbn_correction.set_params(params)
cbn_correction.update()
self.img_model.add_img_correction(cbn_correction, name)
elif name == 'oiadac':
tth_array = 180.0 / np.pi * self.calibration_model.pattern_geometry.ttha
azi_array = 180.0 / np.pi * self.calibration_model.pattern_geometry.chia
oiadac = ObliqueAngleDetectorAbsorptionCorrection(tth_array=tth_array, azi_array=azi_array)
oiadac.set_params(params)
oiadac.update()
self.img_model.add_img_correction(oiadac, name)
elif name == 'transfer':
params = {
'original_data': correction_group.get('original_data')[...],
'original_filename': correction_group.get('original_data').attrs['filename'],
'response_data': correction_group.get('response_data')[...],
'response_filename': correction_group.get('response_data').attrs['filename']
}
self.img_model.transfer_correction.set_params(params)
self.img_model.enable_transfer_function()
# autosave parameters
self.auto_save_integrated_pattern = f.get('general_information').attrs['auto_save_integrated_pattern']
self.integrated_patterns_file_formats = []
for file_format in f.get('general_information').get('integrated_patterns_file_formats'):
self.integrated_patterns_file_formats.append(file_format[0].decode('utf-8'))
if self.calibration_model.is_calibrated:
self.integrate_image_1d()
else:
self.pattern_model.pattern.recalculate_pattern()<|fim▁end|> | self._auto_integrate_pattern = True
self._auto_integrate_cake = False
|
<|file_name|>conf.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
#
# Speedcurve.py documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 16 12:29:28 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx.environment
from docutils.utils import get_source_line
def _warn_node(self, msg, node):
if not msg.startswith('nonlocal image URI found:'):
self._warnfunc(msg, '%s:%s' % get_source_line(node))
sphinx.environment.BuildEnvironment.warn_node = _warn_node
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Speedcurve.py'
copyright = u'2015, Matt Chung'
author = u'Matt Chung'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for<|fim▁hole|>
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Speedcurvepydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Speedcurvepy.tex', u'Speedcurve.py Documentation',
u'Matt Chung', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'speedcurvepy', u'Speedcurve.py Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Speedcurvepy', u'Speedcurve.py Documentation',
author, 'Speedcurvepy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autodoc_mock_imports = ['requests']<|fim▁end|> | # a list of builtin themes.
html_theme = 'sphinx_rtd_theme' |
<|file_name|>decontaminate_unitary.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# File created on 09 Aug 2012
from __future__ import division
__author__ = "Jon Sanders"
__copyright__ = "Copyright 2014, Jon Sanders"
__credits__ = ["Jon Sanders"]
__license__ = "GPL"
__version__ = "1.9.1"
__maintainer__ = "Jon Sanders"
__email__ = "[email protected]"
__status__ = "Development"
from qiime.util import load_qiime_config, parse_command_line_parameters,\
get_options_lookup, make_option
from qiime.parse import parse_qiime_parameters, parse_taxonomy, parse_mapping_file_to_dict
from qiime.filter import sample_ids_from_metadata_description
from bfillings.uclust import get_clusters_from_fasta_filepath
from bfillings.usearch import usearch_qf
from scipy.stats import spearmanr
import os.path
from biom import load_table
import numpy as np
options_lookup = get_options_lookup()
script_info = {}
script_info['brief_description'] = """
A script to filter sequences by potential contaminants"""
script_info['script_description'] = """
This script performs a series of filtering steps on a sequence file with the
intent of removing contaminant sequences. It requires input of an OTU table, a
sample map, an OTU map, a sequence FASTA file, and an output directory.
There are two primary approaches the script can take: (1) comparing sequence
abundances in blank control sequence libraries to those in sample libraries,
where sequences present in blanks are presumed to be contaminants, and (2)
comparing sequences in sample libraries to a database of known contaminants.
In approach (1), OTUs (or unique sequences, if OTU table and map are defined at
100% identity) are tested for their maximum and mean presence in blank and
sample libraries, and excluded if they satisfy the given criteria. For example,
if you want to exclude any sequences whose maximum abundance in a blank sample
is more than 10% the maximum abundance in a sample (maxB > 0.1 * maxS), you
would choose '--removal_stat_blank maxB --removal_stat_sample maxS
--removal_differential 0.1'. For this approach, you must also provide a column
in your mapping file that indicates which samples to use as blanks, and pass
this information to the script with the 'valid states' option (e.g.
'Blank:True')
In approach (2), you must provide a fasta library of putative contaminants.
These may be previously clustered OTUs from the blank samples, commonly
sequenced contaminants (if known), or another fasta file. Sequences will be
clustered against this fasta file using Uclust-Ref, and any that match within
a given percent similarity (using the '-c' or '--contaminant_similarity' option)
will be marked as putative contaminants.
When using approach (2), it is possible to remove 'real' sequences from samples
that just happen to be similar to contaminants. This may be detectable when
using unique sequence OTU tables/maps as input, if the 'real' sequences are
nonetheless slightly different from contaminants. In this case, it may be
desireable to reinstate those unique sequences that are present in samples but
not in blanks. You may do this using criteria of relative abundance (similar to
approach [1], where a sequence is reinstated if its max presence in a sample is
greater than its max presence in a blank, i.e. maxS > X * maxB) or of incidence
in non-blank samples (i.e. reinstated if present in two or more samples). If
both criteria are provided, you must choose to reinstate either the intersection
of the criteria (i.e. BOTH more abundant in samples AND present in 2 or more)
or the union (i.e. EITHER more abundant in samples OR present in 2 or more).
"""
script_info['script_usage'] = []
script_info['script_usage'].append(("""Example:""", """
The following steps are performed by the command below:
1. Calculate max relative abundance of each sequence in samples and blanks
2. Identify sequences whose maximum abunance in blanks is more than 10% their
maximum abundance in samples.
3. Output OTU maps of sequences for which above is true, and for which above is
false.
""", """
decontaminate.py -i unique_seqs_otu_table.biom -o filter_out_dir
-m metadata_mapping_file.txt -f unique_seqs_rep_set.fna
-M unique_seqs_otus.txt -s 'Blank:True' --removal_stat_blank maxB
--removal_stat_sample maxS --removal_differential 0.1
"""))
script_info['output_description'] = """
This script will output a tab-delimited summary table, indicating the relative
abundance stats for each sequence considered, along with its fate at each step
of the process.
It will also output an OTU map for each category of sequences identified (e.g.
those never identified as contaminants, those identified as reference-based
contaminants, those identified as abundance-based contaminants, and those
reinstated). These OTU maps can then be used to filter in the input FASTA file.
Output file naming:
contamination_summary.txt -- tab-delimited per-sequence summary file
assed_otu_map.txt -- OTU map of non-contaminant sequences
ref_contaminants_otu_map.txt -- OTU map of reference contaminant sequences
abund_contaminants_otu_map.txt -- OTU map of abundance contaminant sequences
reinstated_contaminants_otu_map.txt -- OTU map of reinstated sequences
"""
script_info['required_options'] = [
options_lookup["output_dir"]
]
script_info['optional_options'] = [
options_lookup["otu_table_as_primary_input"],
make_option('--mothur_counts_fp',
type='existing_filepath',
help='path to mothur counts table as input'),
options_lookup["mapping_fp"],
make_option('-M', '--otu_map_fp', type="existing_filepath",
help='the input OTU map file'),
make_option('-s',
'--valid_states', type='string',
help="Column header:value pair in mapping file identifying blank samples"),
make_option('--blank_id_fp',
type='existing_filepath',
help='path to file listing blank sample ids'),
options_lookup["input_fasta"],
make_option('--contaminant_db_fp', type="existing_filepath",
help='A FASTA file of potential contaminant sequences'),
make_option('-c', '--contaminant_similarity', type='float', default=0.97,
help=('Sequence similarity threshold for contaminant matches')),
make_option('-r', '--max_correlation', type='float',
help=('Maximum Spearman correlation for contaminant identification')),
make_option('--correlate_header', type='string',
help=('Column header in mapping file with correlation data')),
make_option('--min_relabund_threshold', type="float",
help='discard sequences below this relative abundance threshold'),
make_option('--prescreen_threshold', type="float",
help='prescreen libraries that lose more than this proportion of sequences'),
make_option('--removal_stat_blank', type="choice", choices=["maxB", "avgB"],
help='blank statistic to be used for removal (maxB, avgB)'),
make_option('--removal_stat_sample', type="choice", choices=["maxS", "avgS"],
help='sample statistic to be used for removal (maxS, avgS)'),
make_option('--removal_differential', type="float",
help='differential proportion for removal (maxB > X * maxS)'),
make_option('--reinstatement_stat_blank', type="choice", choices=["maxB", "avgB"],
help='blank statistic to be used for reinstatement (maxB, avgB)'),
make_option('--reinstatement_stat_sample', type="choice", choices=["maxS", "avgS"],
help='sample statistic to be used for reinstatement (maxS, avgS)'),
make_option('--reinstatement_differential', type="float",
help='differential proportion for reinstatement (maxS > X * maxB)'),
make_option('--reinstatement_sample_number', type="int",
help='minimum number of samples necessary for reinstatement'),
make_option('--reinstatement_method', type="choice", choices=["union", "intersection"],
help='method to rectify reinstatement criteria'),
make_option('--drop_lib_threshold', type="float",
help='read loss threshold to drop libraries from output table'),
make_option('--write_filtered_output', action="store_true",
help='write an output table filtered of contaminants'),
make_option('--write_per_library_stats', action="store_true",
help='write a per-library decontamination summary'),
make_option('--write_per_seq_stats', action="store_true",
help='write a per-sequence decontamination summary'),
make_option('--write_per_seq_disposition', action="store_true",
help='write a per-sequence disposition file'),
make_option('--write_output_seq_lists', action="store_true",
help='write separate sequence name lists for each contaminant category')
]
script_info['version'] = __version__
def pick_ref_contaminants(queries, ref_db_fp, input_fasta_fp, contaminant_similarity, output_dir):
# Blast against contaminant DB
clusters, failures, seeds = get_clusters_from_fasta_filepath(
input_fasta_fp,
input_fasta_fp,
percent_ID=contaminant_similarity,
max_accepts=1,
max_rejects=8,
stepwords=8,
word_length=8,
optimal=False,
exact=False,
suppress_sort=False,
output_dir=output_dir,
enable_rev_strand_matching=False,
subject_fasta_filepath=ref_db_fp,
suppress_new_clusters=True,
return_cluster_maps=True,
stable_sort=False,
save_uc_files=True,
HALT_EXEC=False)
# Pick seqs that fail the similarity to contaminants rule
ref_contaminants = set(queries) - set(failures)
return(ref_contaminants)
def pick_corr_contaminants(sample_biom,
corr_data_dict,
max_r):
# Filter biom to only samples for which correlate data available
sample_biom_filt = sample_biom.filter(
lambda val, id_, metadata: id_ in corr_data_dict,
invert=False,
inplace=False)
otus = sample_biom_filt.ids(axis='observation')
samples = sample_biom_filt.ids(axis='sample')
# Make array of correlate data in same order as biom file
correlate = [corr_data_dict[x] for x in samples]
obs_corr_dict = {}
# Make a 2D array of normalized biom table values
norm_array = sample_biom_filt.norm(inplace=False).matrix_data.toarray()
t = 0
for otu in otus:
obs_corr_dict[otu] = spearmanr(norm_array[t], correlate)
t += 1
# get keys (otu names) for OTUs with less than minimum correlation
obs_corr_contaminants = [x for x in obs_corr_dict if obs_corr_dict[x][0] < max_r]
return(set(obs_corr_contaminants), obs_corr_dict)
def reinstate_abund_seqs(putative_contaminants,
contamination_stats_dict,
contamination_stats_header,
reinstatement_stat_sample,
reinstatement_stat_blank,
reinstatement_differential):
abund_reinstated_seqs = compare_blank_abundances(contamination_stats_dict,
contamination_stats_header,
reinstatement_stat_sample,
reinstatement_stat_blank,
reinstatement_differential,
negate=False)
# Only consider seqs as reinstated if previously identified as contaminants
abund_reinstated_seqs = set(putative_contaminants) & set(abund_reinstated_seqs)
return(abund_reinstated_seqs)
def reinstate_incidence_seqs(putative_contaminants,
unique_seq_biom,
blank_sample_ids,
reinstatement_sample_number):
sample_biom = unique_seq_biom.filter(lambda val, id_, metadata:
id_ in blank_sample_ids, invert=True, inplace=False)
incidence_reinstated_seqs = sample_biom.pa().filter(
lambda val, id_, metadata: val.sum() >= reinstatement_sample_number,
axis='observation', inplace=False).ids(
axis='observation')
# Only consider seqs as reinstated if previously identified as contaminants
incidence_reinstated_seqs = set(putative_contaminants) & set(incidence_reinstated_seqs)
return(incidence_reinstated_seqs)
def mothur_counts_to_biom(mothur_fp):
mothur_biom = load_table(mothur_fp)
mothur_biom.type = u'OTU table'
filter_biom = mothur_biom.filter(
lambda val, id_, metadata: id_ in 'total', invert=True)
return(filter_biom)
def biom_to_mothur_counts(biom_obj):
sample_ids = biom_obj.ids(axis='sample')
otu_ids = biom_obj.ids(axis='observation')
otu_totals = biom_obj.sum(axis='observation')
outstring = 'Representative_Sequence\ttotal\t' + '\t'.join(sample_ids) + '\n'
for otu in otu_ids:
otu_data = biom_obj.data(id = otu, axis = 'observation')
outstring += '{0}\t{1}\t{2}\n'.format(otu,
int(otu_data.sum()),
'\t'.join(str(x) for x in otu_data.astype('int')))
return(outstring)
def prescreen_libraries(unique_seq_biom,
blank_sample_ids,
removal_stat_sample,
removal_stat_blank,
removal_differential,
prescreen_threshold):
contamination_stats_header, contamination_stats_dict = \
get_contamination_stats(unique_seq_biom, blank_sample_ids)
abund_contaminants = compare_blank_abundances(contamination_stats_dict,
contamination_stats_header,
removal_stat_sample,
removal_stat_blank,
removal_differential,
negate=True)
# make relabund table
norm_biom = unique_seq_biom.norm(inplace = False)
# filter out sequences marked as contaminants
norm_biom.filter(lambda val, id_, metadata: id_ in abund_contaminants,
axis='observation', invert=True, inplace=True)
# filter out samples above threshold
norm_biom.filter(lambda val, id_, metadata: sum(val) > prescreen_threshold,
axis='sample', invert=False, inplace=True)
# Now only have samples failing the prescreening
above_threshold_samples = norm_biom.ids(axis='sample')
return above_threshold_samples
def get_contamination_stats(biom_file, blank_sample_ids=None, exp_sample_ids=[], proportional=False):
if not proportional:
biom_file = biom_file.norm(inplace=False)
header = ['maxS','avgS']
# Calculate blank stats if blank sample names are provided
if blank_sample_ids:
blanks = True
blank_data = biom_file.filter(blank_sample_ids, axis='sample',
invert=False, inplace=False).matrix_data
maxB = blank_data.max(axis=1).todense().tolist()
avgB = blank_data.mean(axis=1).tolist()
header.append('maxB')
header.append('avgB')
else:
# Otherwise, set the 'blanks' to an empty list
blank_sample_ids = []
blanks = False
# If specific list of experimental sample IDs aren't provided,
# assume everything not marked blank is an experimental sample
if len(exp_sample_ids) == 0:
exp_sample_ids = set(biom_file.ids(axis='sample')) - set(blank_sample_ids)
sample_data = biom_file.filter(exp_sample_ids, axis='sample',
invert=False, inplace=False).matrix_data
maxS = sample_data.max(axis=1).todense().tolist()
avgS = sample_data.mean(axis=1).tolist()
stats_dict = {}
i = 0
if blanks:
for otu in biom_file.ids(axis='observation'):
stats_dict[otu] = [maxS[i][0], avgS[i][0], maxB[i][0], avgB[i][0]]
i += 1
else:
for otu in biom_file.ids(axis='observation'):
stats_dict[otu] = [maxS[i][0], avgS[i][0]]
i += 1
return(header, stats_dict)
def pick_min_relabund_threshold(stats_dict, stats_header, min_relabund, sample_stat='maxS'):
i_s = stats_header.index(sample_stat)
passed_otus = set()
for otu in stats_dict:
if(float(stats_dict[otu][i_s]) < float(min_relabund)):
passed_otus.add(otu)
return(passed_otus)
def compare_blank_abundances(stats_dict, stats_header,
sample_stat, blank_stat, scalar=1, negate=False):
"""Note that this method will default to returning sequences for which
the criteria sample_stat > blank_stat * scalar are TRUE, i.e. non-contam
sequences. To return contaminants (sequences that FAIL the inequality),
set negate to True."""
i_s = stats_header.index(sample_stat)
i_b = stats_header.index(blank_stat)
passed_otus = set()
for otu in stats_dict:
if((float(stats_dict[otu][i_s]) > (float(scalar) * float(stats_dict[otu][i_b]))) != negate):
passed_otus.add(otu)
# print passed_otus
return(passed_otus)
def calc_per_category_decontam_stats(biom_obj, filter_otus):
reads = biom_obj.filter(lambda val, id_, metadata: id_ in filter_otus,
axis='observation', invert=False, inplace=False).sum(axis = 'sample')
otus = biom_obj.pa(inplace = False).filter(lambda val, id_, metadata: id_ in filter_otus,
axis='observation', invert=False, inplace=False).sum(axis = 'sample')
return(reads.tolist(),otus.tolist())
def calc_per_library_decontam_stats(start_biom, output_dict):
# calculate starting number of sequences and unique sequences per library
steps = ['below_relabund_threshold','putative_contaminants','ever_good_seqs','reinstated_seqs','all_good_seqs']
results_dict = {}
results_dict['starting'] = calc_per_category_decontam_stats(start_biom, start_biom.ids(axis='observation'))
results_header = ['starting']
for step in steps:
if step in output_dict:
results_dict[step] = calc_per_category_decontam_stats(start_biom, output_dict[step])
results_header.append(step)
return(results_dict, results_header)
def filter_contaminated_libraries(unique_seq_biom, contaminant_otus, contam_threshold):
# make relabund table
norm_biom = unique_seq_biom.norm(inplace = False)
# filter out sequences marked as contaminants
norm_biom.filter(lambda val, id_, metadata: id_ in contaminant_otus,
axis='observation', invert=True, inplace=True)
# filter out samples above threshold
norm_biom.filter(lambda val, id_, metadata: sum(val) > contam_threshold,
axis='sample', invert=False, inplace=True)
# filter contam sequences from original biom
filtered_biom = unique_seq_biom.filter(lambda val, id_, metadata: id_ in contaminant_otus,
axis='observation', invert=True, inplace=False)
# filter samples that lost too much relative to starting from original biom
filtered_biom = filtered_biom.filter(lambda val, id_, metadata: id_ in norm_biom.ids(axis='sample'),
axis='sample', invert=False, inplace=True)
return(filtered_biom)
def print_filtered_otu_map(input_otu_map_fp, output_otu_map_fp, filter_set):
output_otu_map_f = open(output_otu_map_fp, 'w')
for line in open(input_otu_map_fp, 'U'):
seq_identifier = line.strip().split('\t')[0]
# write OTU line if present in the filter set
if seq_identifier in filter_set:
output_otu_map_f.write(line)
output_otu_map_f.close()
return
def print_filtered_mothur_counts(mothur_counts_fp, output_counts_fp, filter_set):
output_counts_f = open(output_counts_fp, 'w')
t = 0
for line in open(mothur_counts_fp, 'U'):
seq_identifier = line.strip().split('\t')[0]
# only write this line if the otu has more than n sequences (so
# greater than n tab-separated fields including the otu identifier)
# or if it's the header (first) line
if seq_identifier in filter_set or t == 0:
output_counts_f.write(line)
t += 1
output_counts_f.close()
return
def print_per_library_stats(per_library_stats, per_library_stats_header, lib_ids, dropped_libs=[]):
outline = 'Library\t'
outline += '_reads\t'.join(per_library_stats_header) + '_reads\t'
outline += '_otus\t'.join(per_library_stats_header) + '_otus'
if len(dropped_libs) > 0:
outline += '\tlibrary_discarded'
discard = True
else:
discard = False
outline += '\n'
t = 0
for lib in lib_ids:
outline += lib
for category in per_library_stats_header:
outline += '\t' + str(int(per_library_stats[category][0][t]))
for category in per_library_stats_header:
outline += '\t' + str(int(per_library_stats[category][1][t]))
if discard:
if lib in dropped_libs:
outline += '\tTrue'
else:
outline += '\tFalse'
outline += '\n'
t += 1
return(outline)
def print_otu_disposition(input_seqs, output_dict, hierarchy=[]):
outline = ''
if hierarchy == []:
hierarchy = ['below_relabund_threshold', 'putative_contaminants','reinstated_seqs','ever_good_seqs']
# Subset hierarchy to levels also in output dict:
hierarchy = [x for x in hierarchy if x in output_dict]
# Check that the levels of the hierarchy are non-overlapping:
for x in range(len(hierarchy) - 1):
for y in range(x + 1,len(hierarchy)):
if not output_dict[hierarchy[x]].isdisjoint(output_dict[hierarchy[y]]):
print('warning: non-disjoint sets in the disposition hierarchy')
seqs_left = set(input_seqs)
for seq in input_seqs:
for level in hierarchy:
if seq in output_dict[level]:
outline += '{0}\t{1}\n'.format(seq,level)
break
return(outline)
def print_filtered_seq_headers(seq_headers, output_headers_fp, filter_set):
output_headers_f = open(output_headers_fp, 'w')
for x in seq_headers:
if x in filter_set:
output_headers_f.write('{0}\n'.format(x))
output_headers_f.close()
return
def print_filtered_output(output_method, unfiltered_input, output_dir, output_dict, output_categories=None):
output_fn = 'print_filtered_' + output_method
if not output_categories:
output_categories = output_dict.keys()
if output_method == 'seq_headers':
output_fn = print_filtered_seq_headers
elif output_method == 'mothur_counts':
output_fn = print_filtered_mothur_counts
elif output_method == 'otu_map':
output_fn = print_filtered_otu_map
for category in output_categories:
output_fn(unfiltered_input,
os.path.join(output_dir,
'{0}_{1}.txt'.format(category, output_method)),
output_dict[category])
return
def print_results_file(seq_ids,
output_dict,
output_fp,
stats_header=None,
stats_dict=None,
corr_data_dict=None):
output_f = open(output_fp, 'w')
header = "SeqID"
sorted_categories = sorted(output_dict.keys())
for category in sorted_categories:
header += '\t{0}'.format(category)
if stats_header:
for x in stats_header:
header += '\t{0}'.format(x)
if corr_data_dict:
header += '\t{0}\t{1}'.format('spearman_r','spearman_p')
output_f.write(header + '\n')
for otu in seq_ids:
outline = str(otu)
for category in sorted_categories:
outline += '\t{0}'.format(1 if otu in output_dict[category] else 0)
if stats_header:
t = 0
for x in stats_header:
outline += '\t{0:.3f}'.format(stats_dict[otu][t])
t += 1
if corr_data_dict:
outline += '\t{0:.3f}\t{1:.3f}'.format(
corr_data_dict[otu][0],
corr_data_dict[otu][1])
output_f.write(outline + '\n')
return
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
otu_table_fp = opts.otu_table_fp
mothur_counts_fp = opts.mothur_counts_fp
mapping_fp = opts.mapping_fp
valid_states = opts.valid_states
blank_id_fp = opts.blank_id_fp
contaminant_db_fp = opts.contaminant_db_fp
contaminant_similarity = opts.contaminant_similarity
max_correlation = opts.max_correlation
correlate_header = opts.correlate_header
input_fasta_fp = opts.input_fasta_fp
otu_map_fp = opts.otu_map_fp
output_dir = opts.output_dir
min_relabund_threshold = opts.min_relabund_threshold
prescreen_threshold = opts.prescreen_threshold
removal_stat_blank = opts.removal_stat_blank
removal_stat_sample = opts.removal_stat_sample
removal_differential = opts.removal_differential
reinstatement_stat_sample = opts.reinstatement_stat_sample
reinstatement_stat_blank = opts.reinstatement_stat_blank
reinstatement_differential = opts.reinstatement_differential
reinstatement_sample_number = opts.reinstatement_sample_number
reinstatement_method = opts.reinstatement_method
write_output_seq_lists = opts.write_output_seq_lists
write_filtered_output = opts.write_filtered_output
drop_lib_threshold = opts.drop_lib_threshold
write_per_seq_stats = opts.write_per_seq_stats
write_per_library_stats = opts.write_per_library_stats
write_per_seq_disposition = opts.write_per_seq_disposition
# Make unique seq OTU table (biom file)
# Compute unique seq stats
# output biom file with unique seq stats
# Optionally: make candidate contaminant DB
# remove sequences present at higher abundance in samples
# cluster blanks
# remove low-abundance contaminant OTUs
# Filter by similarity against candidate contaminant DB
# annotate unique seq OTU table with top hit (OTU#, rep seq, ID%)
# make list of seqs @ threshold
# Calculate reinstatement rule for filtered sequences
# Generate lists of seqs failing:
# - unique seq rule
# - hit to contaminant
# - reinstatement after hit
# Make sure passed at least one of an OTU biom or mothur counts table file
input_file_counter = 0
if mothur_counts_fp:
input_file_counter += 1
unique_seq_biom = mothur_counts_to_biom(mothur_counts_fp)
mothur_output = True
print "mothur input"
if otu_table_fp:
input_file_counter += 1
unique_seq_biom = load_table(otu_table_fp)
mothur_output = False
print "BIOM input"
if input_file_counter != 1:
option_parser.error("must provide ONLY ONE of an OTU table biom file or"
"mothur counts table")
# Check to make sure that if blank-based contamination filtering requested,
# all necessary options are specified:
removal_options_counter = 0
if removal_stat_blank:
removal_options_counter += 1
if removal_stat_sample:
removal_options_counter += 1
if removal_differential:
removal_options_counter += 1
if ((removal_options_counter > 0) and (removal_options_counter < 3)):
option_parser.error("Must provide all of "
"removal_stats_blank, "
"removal_stat_sample, and "
"removal_differential, or none.")
elif removal_options_counter == 0:
blank_stats_removal = False
elif removal_options_counter == 3:
blank_stats_removal = True
# If reference-based filtering requested, make sure all necessary options
# have been specified:
if contaminant_db_fp and not input_fasta_fp:
option_parser.error("If specifying ref-based contaminant ID, must "
"also specify path to input sequence fasta")
# If correlation-based filtering requested, make sure correlate data
# are specified
if max_correlation and not correlate_header:
option_parser.error("If specifying maximum Spearman correlation, must "
"also provide map column header for correlate data")
# If sequence reinstatement is requested, make sure all necessary options
# are specified
reinstatement_options_counter = 0
if reinstatement_stat_blank:
reinstatement_options_counter += 1
if reinstatement_stat_sample:
reinstatement_options_counter += 1
if reinstatement_differential:
reinstatement_options_counter += 1
if ((reinstatement_options_counter > 0) and
(reinstatement_options_counter < 3)):
option_parser.error("Must provide all of "
"reinstatement_stats_blank, "
"reinstatement_stat_sample, and "
"reinstatement_differential, or none.")
if ((reinstatement_options_counter == 3 and reinstatement_sample_number)
and not reinstatement_method):
option_parser.error("If providing sample number AND abundance criteria "
"for sequence reinstatement, must also provide "
"a method for combining results.")
if reinstatement_options_counter == 3 or reinstatement_sample_number:
reinstatement = True
else:
reinstatement = False
# get blank sample IDs from mapping file or sample ID list
if mapping_fp and valid_states:
blank_sample_ids = sample_ids_from_metadata_description(
open(mapping_fp, 'U'), valid_states)
blanks = True
elif blank_id_fp is not None:
blank_id_f = open(blank_id_fp, 'Ur')
blank_sample_ids = set([line.strip().split()[0]
for line in blank_id_f
if not line.startswith('#')])
blank_id_f.close()
blanks = True
else:
blanks = False
# Initialize output objets
output_dict = {}
contaminant_types = []
contamination_stats_dict = None
contamination_stats_header = None
corr_data_dict = None
# Do blank-based stats calculations, if not there check to make sure no
# blank-dependent methods are requested:
if blanks:
if prescreen_threshold:
low_contam_libraries = prescreen_libraries(unique_seq_biom,
blank_sample_ids,
removal_stat_sample,
removal_stat_blank,
removal_differential,
prescreen_threshold)
contamination_stats_header, contamination_stats_dict = \
get_contamination_stats(unique_seq_biom,
blank_sample_ids,
exp_sample_ids=low_contam_libraries)
else:
contamination_stats_header, contamination_stats_dict = \
get_contamination_stats(unique_seq_biom, blank_sample_ids)
elif (blank_stats_removal or reinstatement or prescreen_threshold):
option_parser.error("Blank-based filtering requested but no blank"
"samples indicated in mapping file or ID file.")
else:
contamination_stats_header, contamination_stats_dict = \
get_contamination_stats(unique_seq_biom)
seq_ids = unique_seq_biom.ids(axis='observation')
# Do blank-based contaminant identification
if min_relabund_threshold:
output_dict['below_relabund_threshold'] = pick_min_relabund_threshold(
contamination_stats_dict,
contamination_stats_header,
min_relabund_threshold)
if blank_stats_removal:
output_dict['abund_contaminants'] = compare_blank_abundances(contamination_stats_dict,
contamination_stats_header,
removal_stat_sample,
removal_stat_blank,
removal_differential,
negate=True)
contaminant_types.append('abund_contaminants')
# Do reference-based contaminant identification
if contaminant_db_fp:
output_dict['ref_contaminants'] = pick_ref_contaminants(seq_ids, contaminant_db_fp, input_fasta_fp, contaminant_similarity, output_dir)
contaminant_types.append('ref_contaminants')
# Do spearman correlation based contaminant identification
if max_correlation:
metadata_dict = parse_mapping_file_to_dict(open(mapping_fp, 'U'))[0]
corr_data_dict = {x: float(metadata_dict[x][correlate_header]) for x in metadata_dict}
output_dict['corr_contaminants'], corr_contaminant_dict = pick_corr_contaminants(unique_seq_biom,
corr_data_dict,
max_correlation)
contaminant_types.append('corr_contaminants')
else:
corr_contaminant_dict = None
# Putative contaminants are those that have been identified by any method
output_dict['putative_contaminants'] = set.union(*map(set, [output_dict[x] for x in contaminant_types]))
# If considering low abundance sequences, remove those from consideration as potential contaminants
if 'below_relabund_threshold' in output_dict:
output_dict['putative_contaminants'] = output_dict['putative_contaminants'] - set(output_dict['below_relabund_threshold'])
# Pick abundance-criterion seqs to reinstate
if (reinstatement_stat_blank and reinstatement_stat_sample and reinstatement_differential):
output_dict['abund_reinstated_seqs'] = reinstate_abund_seqs(output_dict['putative_contaminants'],
contamination_stats_dict,
contamination_stats_header,
reinstatement_stat_sample,
reinstatement_stat_blank,
reinstatement_differential)
output_dict['reinstated_seqs'] = output_dict['abund_reinstated_seqs']
# Pick incidence-criterion seqs to reinstate<|fim▁hole|> if reinstatement_sample_number:
output_dict['incidence_reinstated_seqs'] = reinstate_incidence_seqs(
output_dict['putative_contaminants'],
unique_seq_biom,
blank_sample_ids,
reinstatement_sample_number)
output_dict['reinstated_seqs'] = output_dict['incidence_reinstated_seqs']
# combine incidence and abundance reinstatements
if reinstatement_sample_number and reinstatement_stat_blank:
if reinstatement_method == "union":
output_dict['reinstated_seqs'] = output_dict['abund_reinstated_seqs'] | output_dict['incidence_reinstated_seqs']
elif reinstatement_method == "intersection":
output_dict['reinstated_seqs'] = output_dict['abund_reinstated_seqs'] & output_dict['incidence_reinstated_seqs']
# make sets for sequence _never_ identified as contaminants:
output_dict['ever_good_seqs'] = set(seq_ids) - output_dict['putative_contaminants']
# If considering low abundance sequences, remove those from consideration as potential contaminants
if 'below_relabund_threshold' in output_dict:
output_dict['ever_good_seqs'] = output_dict['ever_good_seqs'] - set(output_dict['below_relabund_threshold'])
# Make set of good seqs for final filtering
final_good_seqs = output_dict['ever_good_seqs']
# ...and those either never ID'd as contaminants or reinstated:
if reinstatement:
output_dict['all_good_seqs'] = set(output_dict['ever_good_seqs'] | output_dict['reinstated_seqs'])
final_good_seqs = output_dict['all_good_seqs']
# ...and those who remain contaminants after reinstatement:
output_dict['never_good_seqs'] = set(output_dict['putative_contaminants'] - output_dict['reinstated_seqs'])
# print filtered OTU maps if given a QIIME OTU map input
if otu_map_fp:
print_filtered_output('otu_map', otu_map_fp, output_dir, output_dict)
# print filtered Mothur counts tables if given a Mothur counts table input
if mothur_output:
print_filtered_output('mothur_counts', mothur_counts_fp, output_dir, output_dict)
# print filtered seq header files if requested
if write_output_seq_lists:
print_filtered_output('seq_headers', seq_ids, output_dir, output_dict)
# filter final biom file to just good seqs
filtered_biom = unique_seq_biom.filter(lambda val, id_, metadata: id_ in final_good_seqs,
axis='observation', invert=False, inplace=False)
# drop heavily contaminated libraries if requested
if drop_lib_threshold:
dropped_libs = unique_seq_biom.norm(inplace=False).filter(lambda val, id_, metadata: id_ in final_good_seqs,
axis='observation', invert=False, inplace=False).filter(lambda val, id_, metadata: sum(val) >= drop_lib_threshold,
axis='sample', invert=True, inplace=False).ids(axis='sample')
filtered_biom.filter(lambda val, id_, metadata: id_ in dropped_libs,
axis='sample', invert=True, inplace=True)
else:
dropped_libs = []
# print filtered biom/mothur_output if library filtering is requested
if write_filtered_output:
if mothur_output:
output_counts_string = biom_to_mothur_counts(filtered_biom)
with open(os.path.join(output_dir,'decontaminated_table.counts'), "w") as output_counts_file:
output_counts_file.write(output_counts_string)
else:
output_biom_string = filtered_biom.to_json('Filtered by decontaminate.py')
output_biom_string
with open(os.path.join(output_dir,'decontaminated_otu_table.biom'), "w") as output_biom_file:
output_biom_file.write(output_biom_string)
# print per-library stats if requested
if write_per_library_stats:
per_library_stats, per_library_stats_header = calc_per_library_decontam_stats(unique_seq_biom, output_dict)
library_stats_string = print_per_library_stats(per_library_stats, per_library_stats_header, unique_seq_biom.ids(axis='sample'), dropped_libs=dropped_libs)
with open(os.path.join(output_dir,'decontamination_per_library_stats.txt'), "w") as output_stats_file:
output_stats_file.write(library_stats_string)
# print otu by disposition file if requested
if write_per_seq_disposition:
per_seq_disposition = print_otu_disposition(seq_ids, output_dict)
with open(os.path.join(output_dir,'decontamination_per_otu_disposition.txt'), "w") as output_stats_file:
output_stats_file.write(per_seq_disposition)
# print log file / per-seq info
if write_per_seq_stats:
print_results_file(seq_ids,
output_dict,
os.path.join(output_dir,'contamination_summary.txt'),
contamination_stats_header,
contamination_stats_dict,
corr_contaminant_dict)
if __name__ == "__main__":
main()<|fim▁end|> | |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>__author__ = 'marijn'
from setuptools import setup
<|fim▁hole|> license="AGPL3",
packages=['goal_notifier'],
requires=[
"google-api-python-client",
"pykka",
"pydub",
"pyopenssl",
],
scripts=["goal_notifier"]
)<|fim▁end|> | setup(
name="goal_notifier",
version="0.0.0", |
<|file_name|>locations.spec.ts<|end_file_name|><|fim▁begin|>'use strict';
import { Location } from '../../src/models/location';
describe('Location', () => {
let mockLocation: Location = new Location({
"id": 178,
"timestamp": "2018-04-09T16:17:26.464000-07:00",
"target": "d--0000-0000-0000-0532",
"lat": "37.406246",
"lon": "-122.109423",
"user": "kaylie"
});<|fim▁hole|> expect(mockLocation.timestamp).toEqual("2018-04-09T16:17:26.464000-07:00");
expect(mockLocation.lat).toBe("37.406246");
expect(mockLocation.lon).toBe("-122.109423");
expect(mockLocation.getPosition()).toEqual({ lat: 37.406246, lng: -122.109423 });
});
});<|fim▁end|> |
it('checks basic', () => {
expect(mockLocation.target).toBe("d--0000-0000-0000-0532"); |
<|file_name|>to_vec.rs<|end_file_name|><|fim▁begin|>// compile-flags: -O
#![crate_type = "lib"]
// CHECK-LABEL: @copy_to_vec<|fim▁hole|>}<|fim▁end|> | #[no_mangle]
fn copy_to_vec(s: &[u64]) -> Vec<u64> {
s.to_vec()
// CHECK: call void @llvm.memcpy |
<|file_name|>test_artificial_1024_Quantization_PolyTrend_30__20.py<|end_file_name|><|fim▁begin|>import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art<|fim▁hole|>
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 30, transform = "Quantization", sigma = 0.0, exog_count = 20, ar_order = 0);<|fim▁end|> | |
<|file_name|>GXDBWRITE.py<|end_file_name|><|fim▁begin|>### extends 'class_empty.py'
### block ClassImports
# NOTICE: Do not edit anything here, it is generated code
from . import gxapi_cy
from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref
from .GXDB import GXDB
from .GXVA import GXVA
from .GXVV import GXVV
### endblock ClassImports
### block Header
# NOTICE: The code generator will not replace the code in this block
### endblock Header
### block ClassImplementation
# NOTICE: Do not edit anything here, it is generated code
class GXDBWRITE(gxapi_cy.WrapDBWRITE):
"""
GXDBWRITE class.
The `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` class is used to open and write to databases. Large blocks of data
are split into blocks and served up sequentially to prevent the over-use of virtual memory when VVs or VAs are being written to channels.
Individual data blocks are limited by default to 1 MB (which is user-alterable). Data less than the block size
are served up whole, one block per line.
"""
def __init__(self, handle=0):
super(GXDBWRITE, self).__init__(GXContext._get_tls_geo(), handle)
@classmethod
def null(cls):
"""
A null (undefined) instance of `GXDBWRITE <geosoft.gxapi.GXDBWRITE>`
:returns: A null `GXDBWRITE <geosoft.gxapi.GXDBWRITE>`
:rtype: GXDBWRITE
"""
return GXDBWRITE()
def is_null(self):
"""
Check if this is a null (undefined) instance
:returns: True if this is a null (undefined) instance, False otherwise.
:rtype: bool
"""
return self._internal_handle() == 0
# Create Methods
@classmethod
def create(cls, db):
"""
Create a `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object
Add channels using the `add_channel <geosoft.gxapi.GXDBWRITE.add_channel>` method.channel.
:param db: Database input
:type db: GXDB
:returns: `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object
:rtype: GXDBWRITE
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapDBWRITE._create(GXContext._get_tls_geo(), db)
return GXDBWRITE(ret_val)
@classmethod
def create_xy(cls, db):
"""
Create a `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object for a XY-located data. Add channels using the
`add_channel <geosoft.gxapi.GXDBWRITE.add_channel>` method.
:param db: Database input
:type db: GXDB
:returns: `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object
:rtype: GXDBWRITE
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapDBWRITE._create_xy(GXContext._get_tls_geo(), db)
return GXDBWRITE(ret_val)
@classmethod
def create_xyz(cls, db):
"""
Create a `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object for a XYZ-located data.
Add channels using the `add_channel <geosoft.gxapi.GXDBWRITE.add_channel>` method.channel
:param db: Database input
:type db: GXDB<|fim▁hole|> :returns: `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object
:rtype: GXDBWRITE
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = gxapi_cy.WrapDBWRITE._create_xyz(GXContext._get_tls_geo(), db)
return GXDBWRITE(ret_val)
def add_channel(self, chan):
"""
Add a data channel to the `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object.
:param chan: Channel handle (does not need to be locked, but can be.)
:type chan: int
:returns: Channel index. Use for getting the correct `GXVV <geosoft.gxapi.GXVV>` or `GXVA <geosoft.gxapi.GXVA>` object.
:rtype: int
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = self._add_channel(chan)
return ret_val
# Data Access Methods
def get_db(self):
"""
Get the output `GXDB <geosoft.gxapi.GXDB>` handle from the `GXDBWRITE <geosoft.gxapi.GXDBWRITE>` object.
:returns: `GXDB <geosoft.gxapi.GXDB>` handle
:rtype: GXDB
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
ret_val = self._get_db()
return GXDB(ret_val)
def get_vv(self, chan):
"""
Get the `GXVV <geosoft.gxapi.GXVV>` handle for a channel.
:param chan: Index of channel to access.
:type chan: int
:returns: `GXVV <geosoft.gxapi.GXVV>` handle
:rtype: GXVV
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Call only for single-column (regular) channels. You can call the `get_chan_array_size <geosoft.gxapi.GXDBWRITE.get_chan_array_size>`
function to find the number fo columns in a given channel. The `GXVV <geosoft.gxapi.GXVV>` is filled anew for each block served up.
"""
ret_val = self._get_vv(chan)
return GXVV(ret_val)
def get_va(self, chan):
"""
Get the `GXVA <geosoft.gxapi.GXVA>` handle for an array channel.
:param chan: Index of channel to access.
:type chan: int
:returns: `GXVA <geosoft.gxapi.GXVA>` handle
:rtype: GXVA
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Call only for array (multi-column) channels. You can call the `get_chan_array_size <geosoft.gxapi.GXDBWRITE.get_chan_array_size>`
function to find the number fo columns in a given channel, or you can call `GXVA.col <geosoft.gxapi.GXVA.col>` on the returned `GXVA <geosoft.gxapi.GXVA>` handle.
The `GXVA <geosoft.gxapi.GXVA>` is filled anew for each block served up.
"""
ret_val = self._get_va(chan)
return GXVA(ret_val)
def get_v_vx(self):
"""
Get the X channel `GXVV <geosoft.gxapi.GXVV>` handle.
:returns: `GXVV <geosoft.gxapi.GXVV>` handle
:rtype: GXVV
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Only available for the CreateXY or CreateXYZ methods.
The `GXVV <geosoft.gxapi.GXVV>` is filled anew for each block served up.
"""
ret_val = self._get_v_vx()
return GXVV(ret_val)
def get_v_vy(self):
"""
Get the Y channel `GXVV <geosoft.gxapi.GXVV>` handle.
:returns: `GXVV <geosoft.gxapi.GXVV>` handle
:rtype: GXVV
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Only available for the CreateXY or CreateXYZ methods.
The `GXVV <geosoft.gxapi.GXVV>` is filled anew for each block served up.
"""
ret_val = self._get_v_vy()
return GXVV(ret_val)
def get_v_vz(self):
"""
Get the Z channel `GXVV <geosoft.gxapi.GXVV>` handle.
:returns: `GXVV <geosoft.gxapi.GXVV>` handle
:rtype: GXVV
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Only available for the CreateXY or CreateXYZ methods.
The `GXVV <geosoft.gxapi.GXVV>` is filled anew for each block served up.
If the Z channel is an array channel, the returned `GXVV <geosoft.gxapi.GXVV>` is the "base" `GXVV <geosoft.gxapi.GXVV>` of the `GXVA <geosoft.gxapi.GXVA>` and contains all items sequentially.
"""
ret_val = self._get_v_vz()
return GXVV(ret_val)
def get_chan_array_size(self, chan):
"""
Get the number of columns of data in a channel.
:param chan: Index of channel to access.
:type chan: int
:returns: The number of columns (array size) for a channel
:rtype: int
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Regular channels have one column of data. Array channels have more than one column of data.
This function should be called to determine whether to use `get_vv <geosoft.gxapi.GXDBWRITE.get_vv>` or `get_va <geosoft.gxapi.GXDBWRITE.get_va>` to access data
for a channel.
"""
ret_val = self._get_chan_array_size(chan)
return ret_val
# Processing
def add_block(self, line):
"""
Add the current block of data.
:param line: Line
:type line: int
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** First, set up the data for each channel by copying values into the individual channel VVs and VAs.
"""
self._add_block(line)
def commit(self):
"""
Commit remaining data to the database.
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
"""
self._commit()
def test_func(self, ra):
"""
Temporary test function.
:param ra: `GXRA <geosoft.gxapi.GXRA>` handle to text file to import.
:type ra: GXRA
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Note:** Designed to import the "Massive.xyz" file, which has data in the format "X Y Z Data".
"""
self._test_func(ra)
### endblock ClassImplementation
### block ClassExtend
# NOTICE: The code generator will not replace the code in this block
### endblock ClassExtend
### block Footer
# NOTICE: The code generator will not replace the code in this block
### endblock Footer<|fim▁end|> | |
<|file_name|>image.js<|end_file_name|><|fim▁begin|>function loadText()
{
var txtLang = document.getElementsByName("txtLang");
txtLang[0].innerHTML = "K\u00E4lla";
txtLang[1].innerHTML = "Alternativ text";
txtLang[2].innerHTML = "Mellanrum";
txtLang[3].innerHTML = "Placering";
txtLang[4].innerHTML = "\u00D6verst";
txtLang[5].innerHTML = "Bildram";
txtLang[6].innerHTML = "Nederst";
txtLang[7].innerHTML = "Bredd";
txtLang[8].innerHTML = "V\u00E4nster";
txtLang[9].innerHTML = "H\u00F6jd";
txtLang[10].innerHTML = "H\u00F6ger";
var optLang = document.getElementsByName("optLang");
optLang[0].text = "Abs. nederst";
optLang[1].text = "Abs. mitten";
optLang[2].text = "Baslinje";<|fim▁hole|> optLang[6].text = "H\u00F6ger";
optLang[7].text = "Text-topp";
optLang[8].text = "\u00D6verst";
document.getElementById("btnBorder").value = " Kantlinje ";
document.getElementById("btnReset").value = "\u00C5terst\u00E4ll";
document.getElementById("btnCancel").value = "Avbryt";
document.getElementById("btnInsert").value = "Infoga";
document.getElementById("btnApply").value = "Verkst\u00E4ll";
document.getElementById("btnOk").value = " OK ";
}
function writeTitle()
{
document.write("<title>Bild</title>")
}<|fim▁end|> | optLang[3].text = "Nederst";
optLang[4].text = "V\u00E4nster";
optLang[5].text = "Mitten"; |
<|file_name|>PlaceOrderController.java<|end_file_name|><|fim▁begin|>package br.copacabana;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.logging.Level;
import javax.cache.Cache;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.springframework.web.servlet.ModelAndView;
import br.com.copacabana.cb.entities.Address;
import br.com.copacabana.cb.entities.Client;
import br.com.copacabana.cb.entities.MealOrder;
import br.com.copacabana.cb.entities.OrderedPlate;
import br.com.copacabana.cb.entities.Plate;
import br.com.copacabana.cb.entities.Restaurant;
import br.com.copacabana.cb.entities.TurnType;
import br.com.copacabana.cb.entities.WorkingHours.DayOfWeek;
import br.copacabana.order.paypal.PayPalProperties.PayPalConfKeys;
import br.copacabana.spring.AddressManager;
import br.copacabana.spring.ClientManager;
import br.copacabana.spring.ConfigurationManager;
import br.copacabana.spring.PlateManager;
import br.copacabana.spring.RestaurantManager;
import br.copacabana.usecase.control.UserActionManager;
import br.copacabana.util.TimeController;
import com.google.appengine.api.datastore.Key;
import com.google.appengine.api.datastore.KeyFactory;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import com.google.gson.JsonPrimitive;
/**
* @author Rafael Coutinho
*/
public class PlaceOrderController extends JsonViewController {
private String formView;
private String successView;
@Override
protected ModelAndView handleRequestInternal(HttpServletRequest request, HttpServletResponse response) throws Exception {
Map<String, Object> model = new HashMap<String, Object>();
model.put("mode", "view");
try {
Cache cache = CacheController.getCache();
if (cache.get(PayPalConfKeys.pppFixedRate.name()) == null) {
ConfigurationManager cm = new ConfigurationManager();
cache.put(PayPalConfKeys.pppFixedRate.name(), cm.getConfigurationValue(PayPalConfKeys.pppFixedRate.name()));
cache.put(PayPalConfKeys.pppPercentageValue.name(), cm.getConfigurationValue(PayPalConfKeys.pppPercentageValue.name()));
}
if (!Authentication.isUserLoggedIn(request.getSession())) {
String orderData = request.getParameter("orderData");
request.getSession().setAttribute("orderData", orderData);
model.put("forwardUrl", "/continueOrder.jsp");
UserActionManager.startOrderNotLogged(orderData, request.getSession().getId());
return new ModelAndView(getFormView(), model);
} else {
String orderData = "";
JsonObject user = Authentication.getLoggedUser(request.getSession());
String loggedUserId = user.get("entity").getAsJsonObject().get("id").getAsString();
if (request.getParameter("orderData") == null) {
orderData = (String) request.getSession().getAttribute("orderData");
} else {
orderData = request.getParameter("orderData");
}
log.log(Level.INFO, "OrderJSon: {0}", orderData);
JsonParser pa = new JsonParser();
JsonObject orderDataJson = (JsonObject) pa.parse(orderData);
ClientManager cman = new ClientManager();
Client c = cman.find(KeyFactory.stringToKey(loggedUserId), Client.class);
MealOrder mo = getMealOrder(c, orderDataJson);
request.getSession().setAttribute("clientPhone", "");
DateSerializer dateSerializer = new DateSerializer(request);
DateDeSerializer dateDeSerializer = new DateDeSerializer(request);
GsonBuilder gsonBuilder = GsonBuilderFactory.getInstance();// new
// GsonBuilder().setPrettyPrinting().serializeNulls().excludeFieldsWithoutExposeAnnotation();
gsonBuilder.registerTypeAdapter(Date.class, dateSerializer);
gsonBuilder.registerTypeAdapter(Date.class, dateDeSerializer);
gsonBuilder.registerTypeAdapter(Key.class, new KeyDeSerializer());
gsonBuilder.registerTypeAdapter(Key.class, new KeySerializer());
Gson gson = gsonBuilder.create();
model.putAll(updateModelData(mo, c, gson));
String json = gson.toJson(mo); // Or use new
json = GsonBuilderFactory.escapeString(json);
request.getSession().setAttribute("orderData", json);
UserActionManager.startOrder(json, loggedUserId, request.getSession().getId());
return new ModelAndView(getSuccessView(), model);
}
} catch (Exception e) {
log.log(Level.SEVERE, "Failed to place order.");
try {
String orderData = "";
log.log(Level.SEVERE, "Checking logged user.");
JsonObject user = Authentication.getLoggedUser(request.getSession());
if (user == null) {
log.log(Level.SEVERE, "user is not logged in.");
}
String loggedUserId = user.get("entity").getAsJsonObject().get("id").getAsString();
log.log(Level.SEVERE, "logged user id {0}", loggedUserId);
if (request.getParameter("orderData") == null) {
log.log(Level.SEVERE, "Order is not in request, checking session");
orderData = (String) request.getSession().getAttribute("orderData");
} else {
log.log(Level.SEVERE, "Order is in request");
orderData = request.getParameter("orderData");
}
if (orderData == null) {
log.log(Level.SEVERE, "Order was null!");
}
log.log(Level.SEVERE, "Order is order :" + orderData);
log.log(Level.SEVERE, "Exception was {0}.", e);
log.log(Level.SEVERE, "Error was {0}.", e.getMessage());
UserActionManager.registerMajorError(request, e, loggedUserId, request.getSession().getId(), "placing order");
} catch (Exception ex) {
log.log(Level.SEVERE, "Failed during loggin of error was {0}.", e);
UserActionManager.registerMajorError(request, e, "placing order 2");
}
throw e;
}
}
public static Map<String, Object> updateModelData(MealOrder mo, Client c, Gson gson) {
Map<String, Object> model = new HashMap<String, Object>();
RestaurantManager rman = new RestaurantManager();
Restaurant r = rman.getRestaurant(mo.getRestaurant());
Boolean b = r.getOnlyForRetrieval();
if (b != null && true == b) {
model.put("onlyForRetrieval", Boolean.TRUE);
} else {
model.put("onlyForRetrieval", Boolean.FALSE);
}
model.put("restaurantAddressKey", KeyFactory.keyToString(r.getAddress()));
model.put("clientCpf", c.getCpf());
model.put("level", c.getLevel().ordinal());
JsonObject json = new JsonObject();
ConfigurationManager cm = new ConfigurationManager();
String hasSpecificLogic = cm.getConfigurationValue("hasSpecificLogic");
model.put("noTakeAwayOrders", "false");
if (hasSpecificLogic != null && hasSpecificLogic.endsWith("true")) {
json = getSteakHouseSpecificData(mo, c, gson);
getMakisSpecificLogic(mo, c, gson, json);
getPapagaiosSpecificLogic(mo, c, gson, json);
getPizzadoroSpecificLogic(mo,c,gson,json);
if (noTakeAwayOrders(mo) == true) {
model.put("noTakeAwayOrders", "true");
}
}
model.put("hasSpecificLogic", json.toString());
if (json.get("javascript") != null && json.get("javascript").getAsString().length() > 0) {
model.put("hasSpecificLogicJavascript", json.get("javascript").getAsString());
}
Address restAddress = new AddressManager().getAddress(r.getAddress());
model.put("restaurantAddress", gson.toJson(restAddress));
return model;
}
private static boolean noTakeAwayOrders(MealOrder mo) {
ConfigurationManager cm = new ConfigurationManager();
String ids = cm.getConfigurationValue("no.takeaway.ids");
String restId = KeyFactory.keyToString(mo.getRestaurant());
if (ids.contains(restId)) {
return true;
}
return false;
}
private static void getPapagaiosSpecificLogic(MealOrder mo, Client c, Gson gson, JsonObject json) {
ConfigurationManager cm = new ConfigurationManager();
String idStr = cm.getConfigurationValue("papagaios.id");
if (idStr != null && idStr.length() > 0) {
Key k = KeyFactory.stringToKey(idStr);
if (k.equals(mo.getRestaurant())) {
json.add("javascript", new JsonPrimitive("/scripts/custom/papagaios.js"));
}
}
}
private static void getPizzadoroSpecificLogic(MealOrder mo, Client c, Gson gson, JsonObject json) {
ConfigurationManager cm = new ConfigurationManager();
String idStr = cm.getConfigurationValue("pizzadoro.id");
if (idStr != null && idStr.length() > 0) {
Key k = KeyFactory.stringToKey(idStr);
if (k.equals(mo.getRestaurant())) {
json.add("javascript", new JsonPrimitive("/scripts/custom/pizzadoro.js"));
}
}
}
private static void getMakisSpecificLogic(MealOrder mo, Client c, Gson gson, JsonObject json) {
try {
ConfigurationManager cm = new ConfigurationManager();
PlateManager pm = new PlateManager();
String makisIdStr = cm.getConfigurationValue("makis.Id");
if (makisIdStr != null && makisIdStr.length() > 0) {
Key makis = KeyFactory.stringToKey(makisIdStr);
if (makis != null && makis.equals(mo.getRestaurant())) {
String packageId = cm.getConfigurationValue("makis.package.id");
if (packageId != null && packageId.length() > 0) {
json.add("makisPackageCostId", new JsonPrimitive(packageId));
json.add("makisMsg", new JsonPrimitive(cm.getConfigurationValue("makis.msg")));
boolean isIncluded = false;
Key packageKey = KeyFactory.stringToKey(packageId);
for (Iterator<OrderedPlate> iterator = mo.getPlates().iterator(); iterator.hasNext();) {
OrderedPlate plate = (OrderedPlate) iterator.next();
if (Boolean.FALSE.equals(plate.getIsFraction()) && plate.getPlate().equals(packageKey)) {
isIncluded = true;
break;
}
}
if (isIncluded == false) {
Plate packagePlate = pm.get(packageKey);
OrderedPlate oplate = new OrderedPlate();
oplate.setName(packagePlate.getName());
oplate.setPrice(packagePlate.getPrice());
oplate.setPriceInCents(packagePlate.getPriceInCents());
oplate.setQty(1);
oplate.setPlate(packageKey);
mo.getPlates().add(oplate);
}
}
}
}
} catch (Exception e) {
log.log(Level.SEVERE, "failed to add makis specific logic", e);
}
}
private static JsonObject getSteakHouseSpecificData(MealOrder mo, Client c, Gson gson) {
JsonObject json = new JsonObject();
json.add("freeDelivery", new JsonPrimitive("false"));
try {
ConfigurationManager cm = new ConfigurationManager();
String steakIdStr = cm.getConfigurationValue("steakHouse.Id");
if (steakIdStr != null && steakIdStr.length() > 0) {
Key steak = KeyFactory.stringToKey(steakIdStr);
if (steak.equals(mo.getRestaurant())) {
if (!TimeController.getDayOfWeek().equals(DayOfWeek.SATURDAY) && !TimeController.getDayOfWeek().equals(DayOfWeek.SUNDAY)) {
if (TimeController.getCurrentTurn().equals(TurnType.LUNCH)) {
String foodCatsStr = cm.getConfigurationValue("steakHouse.FoodCats");
if (foodCatsStr != null && foodCatsStr.length() > 0) {
String[] foodCatsArray = foodCatsStr.split("\\|");
Set<Key> foodCats = new HashSet<Key>();
for (int i = 0; i < foodCatsArray.length; i++) {
if (foodCatsArray[i].length() > 0) {
foodCats.add(KeyFactory.stringToKey(foodCatsArray[i]));
}
}
List<OrderedPlate> plates = mo.getPlates();
PlateManager pm = new PlateManager();
for (Iterator iterator = plates.iterator(); iterator.hasNext();) {
OrderedPlate orderedPlate = (OrderedPlate) iterator.next();
Plate p = null;
if (Boolean.TRUE.equals(orderedPlate.getIsFraction())) {
p = pm.getPlate(orderedPlate.getFractionPlates().iterator().next());
} else {
p = pm.getPlate(orderedPlate.getPlate());
}
if (!foodCats.contains(p.getFoodCategory())) {
json.add("freeDelivery", new JsonPrimitive("false"));
return json;
}
}
json.add("freeDelivery", new JsonPrimitive("true"));
json.add("msg", new JsonPrimitive(cm.getConfigurationValue("steakHouse.msg")));
}
}
}
}
}
} catch (Exception e) {
log.log(Level.SEVERE, "Could not set up things for SteakHouse", e);
}
return json;
}
public MealOrder getMealOrder(Client c, JsonObject sessionOderData) {
MealOrder mo = new MealOrder();
mo.setClient(c);
if (c.getContact() != null) {
mo.setClientPhone(c.getContact().getPhone());
}
mo.setAddress(getAddress(sessionOderData, c));
mo.setObservation(getObservation(sessionOderData));
mo.setRestaurant(getRestKey(sessionOderData));
mo.setPlates(getPlates(sessionOderData));
return mo;
}
private Key getAddress(JsonObject sessionOderData, Client c) {
try {
if (sessionOderData.get("address") == null) {
if (c.getMainAddress() != null) {
return c.getMainAddress();
} else {
return null;
}
} else {
if (sessionOderData.get("address") != null && !sessionOderData.get("address").isJsonNull() ) {
return KeyFactory.stringToKey(sessionOderData.get("address").getAsString());
}else{
return null;
}
}
} catch (Exception e) {
log.log(Level.SEVERE, "no address da sessão havia {0}", sessionOderData.get("address"));
log.log(Level.SEVERE, "Error ao buscar endereço de cliente ou em sessão", e);
return null;
}
}
public List<OrderedPlate> getPlates(JsonObject sessionOderData) {
List<OrderedPlate> orderedPlates = new ArrayList<OrderedPlate>();
JsonArray array = sessionOderData.get("plates").getAsJsonArray();
for (int i = 0; i < array.size(); i++) {
JsonObject pjson = array.get(i).getAsJsonObject();
orderedPlates.add(getOrdered(pjson));
}
return orderedPlates;
}
private OrderedPlate getOrdered(JsonObject pjson) {
OrderedPlate oplate = new OrderedPlate();
oplate.setName(pjson.get("name").getAsString());
oplate.setPrice(pjson.get("price").getAsDouble());
oplate.setPriceInCents(Double.valueOf(pjson.get("price").getAsDouble() * 100.0).intValue());
oplate.setQty(pjson.get("qty").getAsInt());
if (pjson.get("isFraction").getAsBoolean() == true) {
oplate.setIsFraction(Boolean.TRUE);
Set<Key> fractionPlates = new HashSet<Key>();
JsonArray fractionKeys = pjson.get("fractionKeys").getAsJsonArray();
for (int i = 0; i < fractionKeys.size(); i++) {
Key fractionKey = KeyFactory.stringToKey(fractionKeys.get(i).getAsString());
fractionPlates.add(fractionKey);
}
oplate.setFractionPlates(fractionPlates);
return oplate;
} else {
String pkey = "";
if (pjson.get("plate").isJsonObject()) {
pkey = pjson.get("plate").getAsJsonObject().get("id").getAsString();
} else {
pkey = pjson.get("plate").getAsString();
}
oplate.setPlate(KeyFactory.stringToKey(pkey));
return oplate;
}
}
public Key getRestKey(JsonObject sessionOderData) {
String restKey;
if (sessionOderData.get("restaurant") != null) {
if (sessionOderData.get("restaurant").isJsonObject()) {
restKey = sessionOderData.get("restaurant").getAsJsonObject().get("id").getAsString();
} else {
restKey = sessionOderData.get("restaurant").getAsString();
}
} else {
restKey = sessionOderData.get("plates").getAsJsonArray().get(0).getAsJsonObject().get("plate").getAsJsonObject().get("value").getAsJsonObject().get("restaurant").getAsString();
}
return KeyFactory.stringToKey(restKey);
}
public String getObservation(JsonObject sessionOderData) {
return sessionOderData.get("observation").getAsString();
}
public String getFormView() {
<|fim▁hole|> public void setFormView(String formView) {
this.formView = formView;
}
public String getSuccessView() {
return successView;
}
public void setSuccessView(String successView) {
this.successView = successView;
}
}<|fim▁end|> | return formView;
}
|
<|file_name|>folders.ts<|end_file_name|><|fim▁begin|>import $ from 'cafy'; import ID from '../../../../misc/cafy-id';
import DriveFolder, { pack } from '../../../../models/drive-folder';
import { ILocalUser } from '../../../../models/user';
export const meta = {
desc: {
ja: 'ドライブのフォルダ一覧を取得します。',
en: 'Get folders of drive.'
},
requireCredential: true,
kind: 'drive-read'
};
export default (params: any, user: ILocalUser) => new Promise(async (res, rej) => {
// Get 'limit' parameter
const [limit = 10, limitErr] = $.num.optional.range(1, 100).get(params.limit);
if (limitErr) return rej('invalid limit param');
// Get 'sinceId' parameter
const [sinceId, sinceIdErr] = $.type(ID).optional.get(params.sinceId);
if (sinceIdErr) return rej('invalid sinceId param');
// Get 'untilId' parameter
const [untilId, untilIdErr] = $.type(ID).optional.get(params.untilId);
if (untilIdErr) return rej('invalid untilId param');<|fim▁hole|>
// Check if both of sinceId and untilId is specified
if (sinceId && untilId) {
return rej('cannot set sinceId and untilId');
}
// Get 'folderId' parameter
const [folderId = null, folderIdErr] = $.type(ID).optional.nullable.get(params.folderId);
if (folderIdErr) return rej('invalid folderId param');
// Construct query
const sort = {
_id: -1
};
const query = {
userId: user._id,
parentId: folderId
} as any;
if (sinceId) {
sort._id = 1;
query._id = {
$gt: sinceId
};
} else if (untilId) {
query._id = {
$lt: untilId
};
}
// Issue query
const folders = await DriveFolder
.find(query, {
limit: limit,
sort: sort
});
// Serialize
res(await Promise.all(folders.map(async folder =>
await pack(folder))));
});<|fim▁end|> | |
<|file_name|>workarounds.cpp<|end_file_name|><|fim▁begin|>/****************************************************************************
**
** Copyright (C) 2016 The Qt Company Ltd.
** Contact: https://www.qt.io/licensing/
**
** This file is part of Qt Creator.
**
** Commercial License Usage
** Licensees holding valid commercial Qt licenses may use this file in
** accordance with the commercial license agreement provided with the
** Software or, alternatively, in accordance with the terms contained in
** a written agreement between you and The Qt Company. For licensing terms
** and conditions see https://www.qt.io/terms-conditions. For further
** information use the contact form at https://www.qt.io/contact-us.
**
** GNU General Public License Usage
** Alternatively, this file may be used under the terms of the GNU
** General Public License version 3 as published by the Free Software
** Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
** included in the packaging of this file. Please review the following
** information to ensure the GNU General Public License requirements will
** be met: https://www.gnu.org/licenses/gpl-3.0.html.
**
****************************************************************************/
#include "workarounds.h"
#include <QPalette>
#include <utils/stylehelper.h>
QPalette panelPalette(const QPalette &oldPalette, bool lightColored)
{
QColor color = Utils::StyleHelper::panelTextColor(lightColored);
QPalette pal = oldPalette;
pal.setBrush(QPalette::All, QPalette::WindowText, color);
pal.setBrush(QPalette::All, QPalette::ButtonText, color);
pal.setBrush(QPalette::All, QPalette::Foreground, color);
color.setAlpha(100);
pal.setBrush(QPalette::Disabled, QPalette::WindowText, color);
pal.setBrush(QPalette::Disabled, QPalette::ButtonText, color);
pal.setBrush(QPalette::Disabled, QPalette::Foreground, color);<|fim▁hole|> return pal;
}<|fim▁end|> | |
<|file_name|>tranzdump.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import os
import os.path
import re
import sys
import string
from django.apps.registry import apps
from django.core.management.base import BaseCommand, CommandError
from python_translate.extractors import base as extractors
from python_translate import operations
from python_translate.translations import MessageCatalogue
from django_translate.utils import bcolors
from django_translate import services
from django_translate import settings
class AnyFormatSpec:
def __format__(self, fmt):
return ''
class Formatter(string.Formatter):
def __init__(self):
self.used = set()
<|fim▁hole|> return AnyFormatSpec()
class Command(BaseCommand):
help = """Extract translation strings from templates from a given location. It can display them or merge
the new ones into the translation files. When new translation strings are found it can
automatically add a prefix to the translation message.
Example running against app folder
./manage.py tranzdump -l en --path ./ --output-path ./tranz
./manage.py tranzdump -l fr --force --prefix="new_" --app website --exclude ./website/static
"""
def __init__(self, stdout=None, stderr=None, no_color=False):
self.excluded_paths = None
self.locale = None
self.verbosity = None
super(Command, self).__init__(stdout, stderr, no_color)
def add_arguments(self, parser):
parser.add_argument('--locale', '-l', default='en', dest='locale', action='store',
help='Locale to process')
parser.add_argument('--app', '-a', dest='app', action='store',
help='App to scan.')
parser.add_argument('--path', '-p', dest='path', action='store',
help='Path to scan')
parser.add_argument('--output-dir', dest='output_dir', default=None, action='store',
help='Override the default output dir')
parser.add_argument('--exclude-dir', '-x', default=[], dest='excluded_paths', action='append',
help='Paths to exclude. Default is none. Can be used multiple times. '
'Works only with ChainExtractor.')
parser.add_argument('--prefix', dest='prefix', default="__", action='store',
help='Override the default prefix')
parser.add_argument('--format', dest='format', default="yml", action='store',
help='Override the default output format')
parser.add_argument('--dump-messages', dest='dump_messages', action='store_true',
help='Should the messages be dumped in the console')
parser.add_argument('--force', dest='force', action='store_true',
help='Should the update be done')
parser.add_argument('--no-backup', dest='no_backup', action='store_true',
help='Should backup be disabled')
parser.add_argument('--clean', dest='clean', default=False, action='store_true',
help='Should clean not found messages',)
def handle(self, *args, **options):
if options.get('force') != True and options.get('dump_messages') != True:
print((bcolors.WARNING + 'You must choose at least one of --force or --dump-messages' + bcolors.ENDC))
return
if not (bool(options.get('app')) ^ bool(options.get('path'))):
print((bcolors.WARNING + 'You must choose only one of --app or --path' + bcolors.ENDC))
return
if not options.get('output_dir') and (not options.get('app') or not settings.TRANZ_SEARCH_LOCALE_IN_APPS):
print((bcolors.WARNING + 'You must provide an --output-dir when in --path mode, or when TRANZ_SEARCH_LOCALE_IN_APPS ' \
'settings variable is False.' + bcolors.ENDC))
return
self.excluded_paths = [os.path.abspath(path) for path in options['excluded_paths']]
self.excluded_paths += [os.path.abspath(django_translate.__path__[0])]
self.excluded_paths += settings.TRANZ_EXCLUDED_DIRS
# Find directories to scan
if options.get('app'):
for app in list(apps.app_configs.values()):
if app.name == options.get('app'):
current_name = app.name
root_path = app.path
break
else:
raise ValueError("App {0} not found".format(options.get('app')))
else:
root_path = os.path.abspath(options['path'])
current_name = root_path.split("/")[-1]
output_dir = options.get('output_dir') or os.path.join(root_path, 'tranz')
writer = services.writer
print(('Generating "{0}" translation files for "{1}"'.format(options.get('locale'), current_name)))
print("Loading existing messages")
current_catalogue = MessageCatalogue(options['locale'])
loader = services.loader
loader.load_messages(output_dir, current_catalogue)
if len(current_catalogue.messages) == 0:
print(("No messages were loaded, make sure there actually are " \
"translation file in format {{catalog}}.{{locale}}.{{format}} in {0}".format(output_dir)))
return
print("Extracting messages")
extracted_catalogue = MessageCatalogue(options['locale'])
extractor = services.extractor
extractor.set_prefix(options['prefix'])
self.extract_messages(extractor, root_path, extracted_catalogue)
print("Processing catalogues")
operation_class = operations.DiffOperation if options['clean'] else operations.MergeOperation
operation = operation_class(current_catalogue, extracted_catalogue)
if not len(operation.get_domains()):
print("No translations found")
return
if options["dump_messages"]:
for domain in operation.get_domains():
print(("Displaying messages for domain {0}".format(domain)))
new_keys = list(operation.get_new_messages(domain).keys())
all_keys = list(operation.get_messages(domain).keys())
for id in set(all_keys).difference(new_keys):
print(id)
for id in new_keys:
print((bcolors.OKGREEN + id + bcolors.ENDC))
for id in list(operation.get_obsolete_messages(domain).keys()):
print((bcolors.FAIL + id + bcolors.ENDC))
if options["no_backup"]:
writer.disable_backup()
if options["force"]:
print(("Writing files to {0}".format(output_dir)))
writer.write_translations(operation.get_result(), options['format'], {
"path": output_dir,
"default_locale": options['locale']
})
def extract_messages(self, extractor, root_path, extracted_catalogue):
if isinstance(extractor, extractors.ChainExtractor):
subextractors = list(extractor._extractors.values())
else:
subextractors = [extractor]
for subextractor in subextractors:
if not isinstance(subextractor, extractors.BaseExtractor):
subextractor.extract(root_path, extracted_catalogue)
continue
paths = subextractor.extract_files(root_path)
paths = self.filter_exluded_paths(paths)
for path in paths:
try:
subextractor.extract([path], extracted_catalogue)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = 'There was an exception in extractor {0} when processing ' \
'resource "{1}"'.format(type(subextractor).__name__, path)
msg = msg + "\nOriginal message: {0} {1}".format(exc_type.__name__, exc_value)
raise ValueError(msg).with_traceback(exc_traceback)
def filter_exluded_paths(self, paths):
valid = []
for path in paths:
for excluded in self.excluded_paths:
if path.startswith(excluded):
break
else:
valid.append(path)
return valid<|fim▁end|> | def get_value(self, key, args, kwargs):
self.used.add(key) |
<|file_name|>debug.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012 Dag Wieers <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: debug
short_description: Print statements during execution
description:
- This module prints statements during execution and can be useful
for debugging variables or expressions without necessarily halting
the playbook.
- Useful for debugging together with the 'when:' directive.
- This module is also supported for Windows targets.
version_added: '0.8'
options:
msg:
description:
- The customized message that is printed. If omitted, prints a generic message.
type: str
default: 'Hello world!'
var:
description:
- A variable name to debug.
- Mutually exclusive with the C(msg) option.
- Be aware that this option already runs in Jinja2 context and has an implicit C({{ }}) wrapping,
so you should not be using Jinja2 delimiters unless you are looking for double interpolation.
type: str
verbosity:
description:
- A number that controls when the debug is run, if you set to 3 it will only run debug when -vvv or above
type: int
default: 0<|fim▁hole|> version_added: '2.1'
notes:
- This module is also supported for Windows targets.
seealso:
- module: ansible.builtin.assert
- module: ansible.builtin.fail
author:
- Dag Wieers (@dagwieers)
- Michael DeHaan
'''
EXAMPLES = r'''
- name: Print the gateway for each host when defined
ansible.builtin.debug:
msg: System {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}
when: ansible_default_ipv4.gateway is defined
- name: Get uptime information
ansible.builtin.shell: /usr/bin/uptime
register: result
- name: Print return information from the previous task
ansible.builtin.debug:
var: result
verbosity: 2
- name: Display all variables/facts known for a host
ansible.builtin.debug:
var: hostvars[inventory_hostname]
verbosity: 4
- name: Prints two lines of messages, but only if there is an environment value set
ansible.builtin.debug:
msg:
- "Provisioning based on YOUR_KEY which is: {{ lookup('env', 'YOUR_KEY') }}"
- "These servers were built using the password of '{{ password_used }}'. Please retain this for later use."
'''<|fim▁end|> | |
<|file_name|>test_resource_owner_password.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import json
from unittest import mock
from oauthlib.common import Request
from oauthlib.oauth2.rfc6749 import errors
from oauthlib.oauth2.rfc6749.grant_types import (
ResourceOwnerPasswordCredentialsGrant,
)
from oauthlib.oauth2.rfc6749.tokens import BearerToken
from tests.unittest import TestCase
class ResourceOwnerPasswordCredentialsGrantTest(TestCase):
def setUp(self):
mock_client = mock.MagicMock()
mock_client.user.return_value = 'mocked user'
self.request = Request('http://a.b/path')
self.request.grant_type = 'password'
self.request.username = 'john'
self.request.password = 'doe'
self.request.client = mock_client
self.request.scopes = ('mocked', 'scopes')
self.mock_validator = mock.MagicMock()
self.auth = ResourceOwnerPasswordCredentialsGrant(
request_validator=self.mock_validator)
def set_client(self, request, *args, **kwargs):
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def test_create_token_response(self):
bearer = BearerToken(self.mock_validator)
headers, body, status_code = self.auth.create_token_response(
self.request, bearer)
token = json.loads(body)
self.assertEqual(self.mock_validator.save_token.call_count, 1)
self.assertIn('access_token', token)
self.assertIn('token_type', token)
self.assertIn('expires_in', token)
self.assertIn('refresh_token', token)
# ensure client_authentication_required() is properly called
self.mock_validator.client_authentication_required.assert_called_once_with(self.request)
# fail client authentication
self.mock_validator.reset_mock()
self.mock_validator.validate_user.return_value = True
self.mock_validator.authenticate_client.return_value = False
status_code = self.auth.create_token_response(self.request, bearer)[2]
self.assertEqual(status_code, 401)
self.assertEqual(self.mock_validator.save_token.call_count, 0)
# mock client_authentication_required() returning False then fail
self.mock_validator.reset_mock()
self.mock_validator.client_authentication_required.return_value = False
self.mock_validator.authenticate_client_id.return_value = False
status_code = self.auth.create_token_response(self.request, bearer)[2]
self.assertEqual(status_code, 401)
self.assertEqual(self.mock_validator.save_token.call_count, 0)
def test_create_token_response_without_refresh_token(self):
# self.auth.refresh_token = False so we don't generate a refresh token
self.auth = ResourceOwnerPasswordCredentialsGrant(
request_validator=self.mock_validator, refresh_token=False)
bearer = BearerToken(self.mock_validator)
headers, body, status_code = self.auth.create_token_response(
self.request, bearer)
token = json.loads(body)
self.assertEqual(self.mock_validator.save_token.call_count, 1)
self.assertIn('access_token', token)
self.assertIn('token_type', token)
self.assertIn('expires_in', token)
# ensure no refresh token is generated
self.assertNotIn('refresh_token', token)
# ensure client_authentication_required() is properly called
self.mock_validator.client_authentication_required.assert_called_once_with(self.request)
# fail client authentication
self.mock_validator.reset_mock()
self.mock_validator.validate_user.return_value = True
self.mock_validator.authenticate_client.return_value = False
status_code = self.auth.create_token_response(self.request, bearer)[2]
self.assertEqual(status_code, 401)
self.assertEqual(self.mock_validator.save_token.call_count, 0)
# mock client_authentication_required() returning False then fail
self.mock_validator.reset_mock()
self.mock_validator.client_authentication_required.return_value = False
self.mock_validator.authenticate_client_id.return_value = False
status_code = self.auth.create_token_response(self.request, bearer)[2]
self.assertEqual(status_code, 401)
self.assertEqual(self.mock_validator.save_token.call_count, 0)
def test_custom_auth_validators_unsupported(self):
authval1, authval2 = mock.Mock(), mock.Mock()
expected = ('ResourceOwnerPasswordCredentialsGrant does not '
'support authorization validators. Use token '
'validators instead.')
with self.assertRaises(ValueError) as caught:
ResourceOwnerPasswordCredentialsGrant(self.mock_validator,
pre_auth=[authval1])
self.assertEqual(caught.exception.args[0], expected)
with self.assertRaises(ValueError) as caught:
ResourceOwnerPasswordCredentialsGrant(self.mock_validator,
post_auth=[authval2])
self.assertEqual(caught.exception.args[0], expected)
with self.assertRaises(AttributeError):
self.auth.custom_validators.pre_auth.append(authval1)
with self.assertRaises(AttributeError):
self.auth.custom_validators.pre_auth.append(authval2)
def test_custom_token_validators(self):<|fim▁hole|> self.auth.custom_validators.post_token.append(tknval2)
bearer = BearerToken(self.mock_validator)
self.auth.create_token_response(self.request, bearer)
self.assertTrue(tknval1.called)
self.assertTrue(tknval2.called)
def test_error_response(self):
pass
def test_scopes(self):
pass
def test_invalid_request_missing_params(self):
del self.request.grant_type
self.assertRaises(errors.InvalidRequestError, self.auth.validate_token_request,
self.request)
def test_invalid_request_duplicates(self):
request = mock.MagicMock(wraps=self.request)
request.duplicate_params = ['scope']
self.assertRaises(errors.InvalidRequestError, self.auth.validate_token_request,
request)
def test_invalid_grant_type(self):
self.request.grant_type = 'foo'
self.assertRaises(errors.UnsupportedGrantTypeError,
self.auth.validate_token_request, self.request)
def test_invalid_user(self):
self.mock_validator.validate_user.return_value = False
self.assertRaises(errors.InvalidGrantError, self.auth.validate_token_request,
self.request)
def test_client_id_missing(self):
del self.request.client.client_id
self.assertRaises(NotImplementedError, self.auth.validate_token_request,
self.request)
def test_valid_token_request(self):
self.mock_validator.validate_grant_type.return_value = True
self.auth.validate_token_request(self.request)<|fim▁end|> | tknval1, tknval2 = mock.Mock(), mock.Mock()
self.auth.custom_validators.pre_token.append(tknval1) |
<|file_name|>checkHistoryList.js<|end_file_name|><|fim▁begin|>import { injectReducer } from 'STORE/reducers'
export default store => ({<|fim▁hole|> require.ensure([], (require) => {
const CheckHistoryList = require('VIEW/CheckHistoryList').default
const reducer = require('REDUCER/checkHistoryList').default
injectReducer(store, { key: 'checkHistoryList', reducer })
cb(null, CheckHistoryList)
}, 'checkHistoryList')
}
})<|fim▁end|> | path : 'checkHistoryList.html',
getComponent(nextState, cb) { |
<|file_name|>0002_add_subscription.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-19 19:08
from __future__ import unicode_literals
import channels.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),<|fim▁hole|> ]
operations = [
migrations.CreateModel(
name="Subscription",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
("updated_on", models.DateTimeField(auto_now=True)),
("post_id", channels.models.Base36IntegerField()),
("comment_id", channels.models.Base36IntegerField(null=True)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.AlterUniqueTogether(
name="subscription",
unique_together=set([("user", "post_id", "comment_id")]),
),
migrations.AlterIndexTogether(
name="subscription", index_together=set([("post_id", "comment_id")])
),
]<|fim▁end|> | ("channels", "0001_add_tokens"), |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls.defaults import *
<|fim▁hole|>from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^project/', include('project.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/(.*)', admin.site.root),
)<|fim▁end|> | # Uncomment the next two lines to enable the admin: |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""MNE software for MEG and EEG data analysis."""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.devN' where N is an integer.
#
from ._version import __version__
# have to import verbose first since it's needed by many things
from .utils import (set_log_level, set_log_file, verbose, set_config,
get_config, get_config_path, set_cache_dir,
set_memmap_min_size, grand_average, sys_info, open_docs)
from .io.pick import (pick_types, pick_channels,
pick_channels_regexp, pick_channels_forward,
pick_types_forward, pick_channels_cov,
pick_channels_evoked, pick_info,
channel_type, channel_indices_by_type)
from .io.base import concatenate_raws
from .io.meas_info import create_info, Info
from .io.proj import Projection
from .io.kit import read_epochs_kit
from .io.eeglab import read_epochs_eeglab
from .io.reference import (set_eeg_reference, set_bipolar_reference,
add_reference_channels)
from .io.what import what
from .bem import (make_sphere_model, make_bem_model, make_bem_solution,
read_bem_surfaces, write_bem_surfaces, write_head_bem,
read_bem_solution, write_bem_solution)
from .cov import (read_cov, write_cov, Covariance, compute_raw_covariance,
compute_covariance, whiten_evoked, make_ad_hoc_cov)
from .event import (read_events, write_events, find_events, merge_events,
pick_events, make_fixed_length_events, concatenate_events,
find_stim_steps, AcqParserFIF)
from .forward import (read_forward_solution, apply_forward, apply_forward_raw,
average_forward_solutions, Forward,
write_forward_solution, make_forward_solution,
convert_forward_solution, make_field_map,
make_forward_dipole, use_coil_def)
from .source_estimate import (read_source_estimate,
SourceEstimate, VectorSourceEstimate,
VolSourceEstimate, VolVectorSourceEstimate,
MixedSourceEstimate, MixedVectorSourceEstimate,
grade_to_tris,
spatial_src_adjacency,
spatial_tris_adjacency,
spatial_dist_adjacency,
spatial_inter_hemi_adjacency,
spatio_temporal_src_adjacency,
spatio_temporal_tris_adjacency,
spatio_temporal_dist_adjacency,
extract_label_time_course, stc_near_sensors)
from .surface import (read_surface, write_surface, decimate_surface, read_tri,
read_morph_map, get_head_surf, get_meg_helmet_surf,
dig_mri_distances)
from .morph import (SourceMorph, read_source_morph, grade_to_vertices,
compute_source_morph)
from .source_space import (read_source_spaces, vertex_to_mni,
head_to_mni, head_to_mri, read_talxfm,
write_source_spaces, setup_source_space,
setup_volume_source_space, SourceSpaces,
add_source_space_distances, morph_source_spaces,
get_volume_labels_from_aseg,
get_volume_labels_from_src, read_freesurfer_lut)
from .annotations import (Annotations, read_annotations, annotations_from_events,<|fim▁hole|> events_from_annotations)
from .epochs import (BaseEpochs, Epochs, EpochsArray, read_epochs,
concatenate_epochs, make_fixed_length_epochs)
from .evoked import Evoked, EvokedArray, read_evokeds, write_evokeds, combine_evoked
from .label import (read_label, label_sign_flip,
write_label, stc_to_label, grow_labels, Label, split_label,
BiHemiLabel, read_labels_from_annot, write_labels_to_annot,
random_parcellation, morph_labels, labels_to_stc)
from .misc import parse_config, read_reject_parameters
from .coreg import (create_default_subject, scale_bem, scale_mri, scale_labels,
scale_source_space)
from .transforms import (read_trans, write_trans,
transform_surface_to, Transform)
from .proj import (read_proj, write_proj, compute_proj_epochs,
compute_proj_evoked, compute_proj_raw, sensitivity_map)
from .dipole import read_dipole, Dipole, DipoleFixed, fit_dipole
from .channels import (equalize_channels, rename_channels, find_layout,
read_vectorview_selection)
from .report import Report, open_report
from .io import read_epochs_fieldtrip, read_evoked_fieldtrip, read_evokeds_mff
from .rank import compute_rank
from . import beamformer
from . import channels
from . import chpi
from . import commands
from . import connectivity
from . import coreg
from . import cuda
from . import datasets
from . import dipole
from . import epochs
from . import event
from . import externals
from . import io
from . import filter
from . import gui
from . import inverse_sparse
from . import minimum_norm
from . import preprocessing
from . import simulation
from . import stats
from . import surface
from . import time_frequency
from . import viz
from . import decoding
# deprecations
from .utils import deprecated_alias
deprecated_alias('read_selection', read_vectorview_selection)
# initialize logging
set_log_level(None, False)
set_log_file()<|fim▁end|> | |
<|file_name|>bzr.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, André Paramés <[email protected]>
# Based on the Git module by Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = u'''
---
module: bzr
author: "André Paramés (@andreparames)"
version_added: "1.1"
short_description: Deploy software (or files) from bzr branches
description:
- Manage I(bzr) branches to deploy files or software.
options:
name:
required: true
aliases: [ 'parent' ]
description:
- SSH or HTTP protocol address of the parent branch.
dest:
required: true
description:
- Absolute path of where the branch should be cloned to.
version:
required: false
default: "head"
description:
- What version of the branch to clone. This can be the
bzr revno or revid.
force:
required: false
default: "no"
choices: [ 'yes', 'no' ]
description:
- If C(yes), any modified files in the working
tree will be discarded. Before 1.9 the default
value was "yes".
executable:
required: false
default: null
version_added: "1.4"
description:
- Path to bzr executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
'''
EXAMPLES = '''
# Example bzr checkout from Ansible Playbooks
- bzr:
name: 'bzr+ssh://foosball.example.org/path/to/branch'
dest: /srv/checkout
version: 22
'''
import re
class Bzr(object):
def __init__(self, module, parent, dest, version, bzr_path):
self.module = module
self.parent = parent
self.dest = dest
self.version = version
self.bzr_path = bzr_path
def _command(self, args_list, cwd=None, **kwargs):
(rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs)
return (rc, out, err)
def get_version(self):
'''samples the version of the bzr branch'''
cmd = "%s revno" % self.bzr_path
rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
revno = stdout.strip()
return revno
def clone(self):
'''makes a new bzr branch if it does not already exist'''
dest_dirname = os.path.dirname(self.dest)
try:
os.makedirs(dest_dirname)
except:
pass
if self.version.lower() != 'head':
args_list = ["branch", "-r", self.version, self.parent, self.dest]
else:
args_list = ["branch", self.parent, self.dest]
return self._command(args_list, check_rc=True, cwd=dest_dirname)
def has_local_mods(self):
cmd = "%s status -S" % self.bzr_path
rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
lines = stdout.splitlines()
lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines)
return len(lines) > 0
def reset(self, force):
'''
Resets the index and working tree to head.
Discards any changes to tracked files in the working
tree since that commit.<|fim▁hole|> '''
if not force and self.has_local_mods():
self.module.fail_json(msg="Local modifications exist in branch (force=no).")
return self._command(["revert"], check_rc=True, cwd=self.dest)
def fetch(self):
'''updates branch from remote sources'''
if self.version.lower() != 'head':
(rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest)
else:
(rc, out, err) = self._command(["pull"], cwd=self.dest)
if rc != 0:
self.module.fail_json(msg="Failed to pull")
return (rc, out, err)
def switch_version(self):
'''once pulled, switch to a particular revno or revid'''
if self.version.lower() != 'head':
args_list = ["revert", "-r", self.version]
else:
args_list = ["revert"]
return self._command(args_list, check_rc=True, cwd=self.dest)
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
dest=dict(required=True, type='path'),
name=dict(required=True, aliases=['parent']),
version=dict(default='head'),
force=dict(default='no', type='bool'),
executable=dict(default=None),
)
)
dest = module.params['dest']
parent = module.params['name']
version = module.params['version']
force = module.params['force']
bzr_path = module.params['executable'] or module.get_bin_path('bzr', True)
bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf')
rc, out, err, status = (0, None, None, None)
bzr = Bzr(module, parent, dest, version, bzr_path)
# if there is no bzr configuration, do a branch operation
# else pull and switch the version
before = None
local_mods = False
if not os.path.exists(bzrconfig):
(rc, out, err) = bzr.clone()
else:
# else do a pull
local_mods = bzr.has_local_mods()
before = bzr.get_version()
(rc, out, err) = bzr.reset(force)
if rc != 0:
module.fail_json(msg=err)
(rc, out, err) = bzr.fetch()
if rc != 0:
module.fail_json(msg=err)
# switch to version specified regardless of whether
# we cloned or pulled
(rc, out, err) = bzr.switch_version()
# determine if we changed anything
after = bzr.get_version()
changed = False
if before != after or local_mods:
changed = True
module.exit_json(changed=changed, before=before, after=after)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()<|fim▁end|> | |
<|file_name|>bulk.py<|end_file_name|><|fim▁begin|># Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from six.moves.urllib.parse import quote, unquote
import tarfile
from xml.sax import saxutils
from time import time
from eventlet import sleep
import zlib
from swift.common.swob import Request, HTTPBadGateway, \
HTTPCreated, HTTPBadRequest, HTTPNotFound, HTTPUnauthorized, HTTPOk, \
HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPNotAcceptable, \
HTTPLengthRequired, HTTPException, HTTPServerError, wsgify
from swift.common.utils import get_logger, register_swift_info
from swift.common import constraints
from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND, HTTP_CONFLICT
class CreateContainerError(Exception):
def __init__(self, msg, status_int, status):
self.status_int = status_int
self.status = status
super(CreateContainerError, self).__init__(msg)
ACCEPTABLE_FORMATS = ['text/plain', 'application/json', 'application/xml',
'text/xml']
def get_response_body(data_format, data_dict, error_list):
"""
Returns a properly formatted response body according to format. Handles
json and xml, otherwise will return text/plain. Note: xml response does not
include xml declaration.
:params data_format: resulting format
:params data_dict: generated data about results.
:params error_list: list of quoted filenames that failed
"""
if data_format == 'application/json':
data_dict['Errors'] = error_list
return json.dumps(data_dict)
if data_format and data_format.endswith('/xml'):
output = '<delete>\n'
for key in sorted(data_dict):
xml_key = key.replace(' ', '_').lower()
output += '<%s>%s</%s>\n' % (xml_key, data_dict[key], xml_key)
output += '<errors>\n'
output += '\n'.join(
['<object>'
'<name>%s</name><status>%s</status>'
'</object>' % (saxutils.escape(name), status) for
name, status in error_list])
output += '</errors>\n</delete>\n'
return output
output = ''
for key in sorted(data_dict):
output += '%s: %s\n' % (key, data_dict[key])
output += 'Errors:\n'
output += '\n'.join(
['%s, %s' % (name, status)
for name, status in error_list])
return output
def pax_key_to_swift_header(pax_key):
if (pax_key == u"SCHILY.xattr.user.mime_type" or
pax_key == u"LIBARCHIVE.xattr.user.mime_type"):
return "Content-Type"
elif pax_key.startswith(u"SCHILY.xattr.user.meta."):
useful_part = pax_key[len(u"SCHILY.xattr.user.meta."):]
return "X-Object-Meta-" + useful_part.encode("utf-8")
elif pax_key.startswith(u"LIBARCHIVE.xattr.user.meta."):
useful_part = pax_key[len(u"LIBARCHIVE.xattr.user.meta."):]
return "X-Object-Meta-" + useful_part.encode("utf-8")
else:
# You can get things like atime/mtime/ctime or filesystem ACLs in
# pax headers; those aren't really user metadata. The same goes for
# other, non-user metadata.
return None
class Bulk(object):
"""
Middleware that will do many operations on a single request.
Extract Archive:
Expand tar files into a swift account. Request must be a PUT with the
query parameter ?extract-archive=format specifying the format of archive
file. Accepted formats are tar, tar.gz, and tar.bz2.
For a PUT to the following url:
/v1/AUTH_Account/$UPLOAD_PATH?extract-archive=tar.gz
UPLOAD_PATH is where the files will be expanded to. UPLOAD_PATH can be a
container, a pseudo-directory within a container, or an empty string. The
destination of a file in the archive will be built as follows:
/v1/AUTH_Account/$UPLOAD_PATH/$FILE_PATH
Where FILE_PATH is the file name from the listing in the tar file.
If the UPLOAD_PATH is an empty string, containers will be auto created
accordingly and files in the tar that would not map to any container (files
in the base directory) will be ignored.
Only regular files will be uploaded. Empty directories, symlinks, etc will
not be uploaded.
Content Type:
If the content-type header is set in the extract-archive call, Swift will
assign that content-type to all the underlying files. The bulk middleware
will extract the archive file and send the internal files using PUT
operations using the same headers from the original request
(e.g. auth-tokens, content-Type, etc.). Notice that any middleware call
that follows the bulk middleware does not know if this was a bulk request
or if these were individual requests sent by the user.
In order to make Swift detect the content-type for the files based on the
file extension, the content-type in the extract-archive call should not be
set. Alternatively, it is possible to explicitly tell swift to detect the
content type using this header:
X-Detect-Content-Type:true
For example:
curl -X PUT http://127.0.0.1/v1/AUTH_acc/cont/$?extract-archive=tar -T
backup.tar -H "Content-Type: application/x-tar" -H "X-Auth-Token: xxx"
-H "X-Detect-Content-Type:true"
Assigning Metadata:
The tar file format (1) allows for UTF-8 key/value pairs to be associated
with each file in an archive. If a file has extended attributes, then tar
will store those as key/value pairs. The bulk middleware can read those
extended attributes and convert them to Swift object metadata. Attributes
starting with "user.meta" are converted to object metadata, and
"user.mime_type" is converted to Content-Type.
For example:
setfattr -n user.mime_type -v "application/python-setup" setup.py
setfattr -n user.meta.lunch -v "burger and fries" setup.py
setfattr -n user.meta.dinner -v "baked ziti" setup.py
setfattr -n user.stuff -v "whee" setup.py
Will get translated to headers:
Content-Type: application/python-setup
X-Object-Meta-Lunch: burger and fries
X-Object-Meta-Dinner: baked ziti
The bulk middleware will handle xattrs stored by both GNU and BSD tar (2).
Only xattrs user.mime_type and user.meta.* are processed. Other attributes
are ignored.
Notes:
(1) The POSIX 1003.1-2001 (pax) format. The default format on GNU tar
1.27.1 or later.
(2) Even with pax-format tarballs, different encoders store xattrs slightly
differently; for example, GNU tar stores the xattr "user.userattribute" as
pax header "SCHILY.xattr.user.userattribute", while BSD tar (which uses
libarchive) stores it as "LIBARCHIVE.xattr.user.userattribute".
Response:
The response from bulk operations functions differently from other swift
responses. This is because a short request body sent from the client could
result in many operations on the proxy server and precautions need to be
made to prevent the request from timing out due to lack of activity. To
this end, the client will always receive a 200 OK response, regardless of
the actual success of the call. The body of the response must be parsed to
determine the actual success of the operation. In addition to this the
client may receive zero or more whitespace characters prepended to the
actual response body while the proxy server is completing the request.
The format of the response body defaults to text/plain but can be either
json or xml depending on the Accept header. Acceptable formats are
text/plain, application/json, application/xml, and text/xml. An example
body is as follows:
{"Response Status": "201 Created",
"Response Body": "",
"Errors": [],
"Number Files Created": 10}
If all valid files were uploaded successfully the Response Status will be
201 Created. If any files failed to be created the response code
corresponds to the subrequest's error. Possible codes are 400, 401, 502 (on
server errors), etc. In both cases the response body will specify the
number of files successfully uploaded and a list of the files that failed.
There are proxy logs created for each file (which becomes a subrequest) in
the tar. The subrequest's proxy log will have a swift.source set to "EA"
the log's content length will reflect the unzipped size of the file. If
double proxy-logging is used the leftmost logger will not have a
swift.source set and the content length will reflect the size of the
payload sent to the proxy (the unexpanded size of the tar.gz).
Bulk Delete:
Will delete multiple objects or containers from their account with a
single request. Responds to POST requests with query parameter
?bulk-delete set. The request url is your storage url. The Content-Type
should be set to text/plain. The body of the POST request will be a
newline separated list of url encoded objects to delete. You can delete
10,000 (configurable) objects per request. The objects specified in the
POST request body must be URL encoded and in the form:
/container_name/obj_name
or for a container (which must be empty at time of delete)
/container_name
The response is similar to extract archive as in every response will be a
200 OK and you must parse the response body for actual results. An example
response is:
{"Number Not Found": 0,
"Response Status": "200 OK",
"Response Body": "",
"Errors": [],
"Number Deleted": 6}
If all items were successfully deleted (or did not exist), the Response
Status will be 200 OK. If any failed to delete, the response code
corresponds to the subrequest's error. Possible codes are 400, 401, 502 (on
server errors), etc. In all cases the response body will specify the number
of items successfully deleted, not found, and a list of those that failed.
The return body will be formatted in the way specified in the request's
Accept header. Acceptable formats are text/plain, application/json,
application/xml, and text/xml.
There are proxy logs created for each object or container (which becomes a
subrequest) that is deleted. The subrequest's proxy log will have a
swift.source set to "BD" the log's content length of 0. If double
proxy-logging is used the leftmost logger will not have a
swift.source set and the content length will reflect the size of the
payload sent to the proxy (the list of objects/containers to be deleted).
"""
def __init__(self, app, conf, max_containers_per_extraction=10000,
max_failed_extractions=1000, max_deletes_per_request=10000,
max_failed_deletes=1000, yield_frequency=10, retry_count=0,
retry_interval=1.5, logger=None):
self.app = app
self.logger = logger or get_logger(conf, log_route='bulk')
self.max_containers = max_containers_per_extraction
self.max_failed_extractions = max_failed_extractions
self.max_failed_deletes = max_failed_deletes
self.max_deletes_per_request = max_deletes_per_request
self.yield_frequency = yield_frequency
self.retry_count = retry_count
self.retry_interval = retry_interval
self.max_path_length = constraints.MAX_OBJECT_NAME_LENGTH \
+ constraints.MAX_CONTAINER_NAME_LENGTH + 2
def create_container(self, req, container_path):
"""
Checks if the container exists and if not try to create it.
:params container_path: an unquoted path to a container to be created
:returns: True if created container, False if container exists
:raises: CreateContainerError when unable to create container
"""
new_env = req.environ.copy()
new_env['PATH_INFO'] = container_path
new_env['swift.source'] = 'EA'
new_env['REQUEST_METHOD'] = 'HEAD'
head_cont_req = Request.blank(container_path, environ=new_env)
resp = head_cont_req.get_response(self.app)
if resp.is_success:
return False
if resp.status_int == 404:
new_env = req.environ.copy()
new_env['PATH_INFO'] = container_path
new_env['swift.source'] = 'EA'
new_env['REQUEST_METHOD'] = 'PUT'
create_cont_req = Request.blank(container_path, environ=new_env)
resp = create_cont_req.get_response(self.app)
if resp.is_success:
return True
raise CreateContainerError(
"Create Container Failed: " + container_path,
resp.status_int, resp.status)
def get_objs_to_delete(self, req):
"""
Will populate objs_to_delete with data from request input.
:params req: a Swob request
:returns: a list of the contents of req.body when separated by newline.
:raises: HTTPException on failures
"""
line = ''
data_remaining = True
objs_to_delete = []
if req.content_length is None and \
req.headers.get('transfer-encoding', '').lower() != 'chunked':
raise HTTPLengthRequired(request=req)
while data_remaining:
if '\n' in line:
obj_to_delete, line = line.split('\n', 1)
obj_to_delete = obj_to_delete.strip()
objs_to_delete.append(
{'name': unquote(obj_to_delete)})
else:
data = req.body_file.read(self.max_path_length)
if data:
line += data
else:
data_remaining = False
obj_to_delete = line.strip()
if obj_to_delete:
objs_to_delete.append(
{'name': unquote(obj_to_delete)})
if len(objs_to_delete) > self.max_deletes_per_request:
raise HTTPRequestEntityTooLarge(
'Maximum Bulk Deletes: %d per request' %
self.max_deletes_per_request)
if len(line) > self.max_path_length * 2:
raise HTTPBadRequest('Invalid File Name')
return objs_to_delete
def handle_delete_iter(self, req, objs_to_delete=None,
user_agent='BulkDelete', swift_source='BD',
out_content_type='text/plain'):
"""
A generator that can be assigned to a swob Response's app_iter which,
when iterated over, will delete the objects specified in request body.
Will occasionally yield whitespace while request is being processed.
When the request is completed will yield a response body that can be
parsed to determine success. See above documentation for details.
:params req: a swob Request
:params objs_to_delete: a list of dictionaries that specifies the
objects to be deleted. If None, uses self.get_objs_to_delete to
query request.
"""
last_yield = time()
separator = ''
failed_files = []
resp_dict = {'Response Status': HTTPOk().status,
'Response Body': '',
'Number Deleted': 0,
'Number Not Found': 0}
try:
if not out_content_type:
raise HTTPNotAcceptable(request=req)
if out_content_type.endswith('/xml'):
yield '<?xml version="1.0" encoding="UTF-8"?>\n'
try:
vrs, account, _junk = req.split_path(2, 3, True)
except ValueError:
raise HTTPNotFound(request=req)
incoming_format = req.headers.get('Content-Type')
if incoming_format and \
not incoming_format.startswith('text/plain'):
# For now only accept newline separated object names
raise HTTPNotAcceptable(request=req)
if objs_to_delete is None:
objs_to_delete = self.get_objs_to_delete(req)
failed_file_response = {'type': HTTPBadRequest}
req.environ['eventlet.minimum_write_chunk_size'] = 0
for obj_to_delete in objs_to_delete:
if last_yield + self.yield_frequency < time():
separator = '\r\n\r\n'
last_yield = time()
yield ' '
obj_name = obj_to_delete['name']
if not obj_name:
continue
if len(failed_files) >= self.max_failed_deletes:
raise HTTPBadRequest('Max delete failures exceeded')
if obj_to_delete.get('error'):
if obj_to_delete['error']['code'] == HTTP_NOT_FOUND:
resp_dict['Number Not Found'] += 1
else:
failed_files.append([quote(obj_name),
obj_to_delete['error']['message']])
continue
delete_path = '/'.join(['', vrs, account,
obj_name.lstrip('/')])
if not constraints.check_utf8(delete_path):
failed_files.append([quote(obj_name),
HTTPPreconditionFailed().status])
continue
new_env = req.environ.copy()
new_env['PATH_INFO'] = delete_path
del(new_env['wsgi.input'])
new_env['CONTENT_LENGTH'] = 0
new_env['REQUEST_METHOD'] = 'DELETE'
new_env['HTTP_USER_AGENT'] = \
'%s %s' % (req.environ.get('HTTP_USER_AGENT'), user_agent)
new_env['swift.source'] = swift_source
self._process_delete(delete_path, obj_name, new_env, resp_dict,
failed_files, failed_file_response)
if failed_files:
resp_dict['Response Status'] = \
failed_file_response['type']().status
elif not (resp_dict['Number Deleted'] or
resp_dict['Number Not Found']):
resp_dict['Response Status'] = HTTPBadRequest().status
resp_dict['Response Body'] = 'Invalid bulk delete.'
except HTTPException as err:
resp_dict['Response Status'] = err.status
resp_dict['Response Body'] = err.body
except Exception:
self.logger.exception('Error in bulk delete.')
resp_dict['Response Status'] = HTTPServerError().status
yield separator + get_response_body(out_content_type,
resp_dict, failed_files)
def handle_extract_iter(self, req, compress_type,
out_content_type='text/plain'):
"""
A generator that can be assigned to a swob Response's app_iter which,
when iterated over, will extract and PUT the objects pulled from the
request body. Will occasionally yield whitespace while request is being
processed. When the request is completed will yield a response body
that can be parsed to determine success. See above documentation for
details.
:params req: a swob Request
:params compress_type: specifying the compression type of the tar.
Accepts '', 'gz', or 'bz2'
"""
resp_dict = {'Response Status': HTTPCreated().status,
'Response Body': '', 'Number Files Created': 0}
failed_files = []
last_yield = time()
separator = ''
containers_accessed = set()
try:
if not out_content_type:
raise HTTPNotAcceptable(request=req)
if out_content_type.endswith('/xml'):
yield '<?xml version="1.0" encoding="UTF-8"?>\n'
if req.content_length is None and \
req.headers.get('transfer-encoding',
'').lower() != 'chunked':
raise HTTPLengthRequired(request=req)
try:
vrs, account, extract_base = req.split_path(2, 3, True)
except ValueError:
raise HTTPNotFound(request=req)
extract_base = extract_base or ''
extract_base = extract_base.rstrip('/')
tar = tarfile.open(mode='r|' + compress_type,
fileobj=req.body_file)
failed_response_type = HTTPBadRequest
req.environ['eventlet.minimum_write_chunk_size'] = 0
containers_created = 0
while True:
if last_yield + self.yield_frequency < time():
separator = '\r\n\r\n'
last_yield = time()
yield ' '
tar_info = next(tar)
if tar_info is None or \
len(failed_files) >= self.max_failed_extractions:
break
if tar_info.isfile():
obj_path = tar_info.name
if obj_path.startswith('./'):
obj_path = obj_path[2:]
obj_path = obj_path.lstrip('/')
if extract_base:
obj_path = extract_base + '/' + obj_path
if '/' not in obj_path:
continue # ignore base level file
destination = '/'.join(
['', vrs, account, obj_path])
container = obj_path.split('/', 1)[0]
if not constraints.check_utf8(destination):
failed_files.append(
[quote(obj_path[:self.max_path_length]),
HTTPPreconditionFailed().status])
continue
if tar_info.size > constraints.MAX_FILE_SIZE:
failed_files.append([
quote(obj_path[:self.max_path_length]),
HTTPRequestEntityTooLarge().status])
continue
container_failure = None
if container not in containers_accessed:
cont_path = '/'.join(['', vrs, account, container])
try:
if self.create_container(req, cont_path):
containers_created += 1
if containers_created > self.max_containers:
raise HTTPBadRequest(
'More than %d containers to create '
'from tar.' % self.max_containers)
except CreateContainerError as err:
# the object PUT to this container still may
# succeed if acls are set
container_failure = [
quote(cont_path[:self.max_path_length]),
err.status]
if err.status_int == HTTP_UNAUTHORIZED:
raise HTTPUnauthorized(request=req)
except ValueError:
failed_files.append([
quote(obj_path[:self.max_path_length]),
HTTPBadRequest().status])
continue
tar_file = tar.extractfile(tar_info)
new_env = req.environ.copy()
new_env['REQUEST_METHOD'] = 'PUT'
new_env['wsgi.input'] = tar_file
new_env['PATH_INFO'] = destination
new_env['CONTENT_LENGTH'] = tar_info.size
new_env['swift.source'] = 'EA'
new_env['HTTP_USER_AGENT'] = \
'%s BulkExpand' % req.environ.get('HTTP_USER_AGENT')
create_obj_req = Request.blank(destination, new_env)
for pax_key, pax_value in tar_info.pax_headers.items():
header_name = pax_key_to_swift_header(pax_key)
if header_name:
# Both pax_key and pax_value are unicode
# strings; the key is already UTF-8 encoded, but
# we still have to encode the value.
create_obj_req.headers[header_name] = \
pax_value.encode("utf-8")
resp = create_obj_req.get_response(self.app)
containers_accessed.add(container)
if resp.is_success:
resp_dict['Number Files Created'] += 1
else:
if container_failure:
failed_files.append(container_failure)
if resp.status_int == HTTP_UNAUTHORIZED:
failed_files.append([
quote(obj_path[:self.max_path_length]),
HTTPUnauthorized().status])
raise HTTPUnauthorized(request=req)
if resp.status_int // 100 == 5:
failed_response_type = HTTPBadGateway
failed_files.append([
quote(obj_path[:self.max_path_length]),
resp.status])
if failed_files:
resp_dict['Response Status'] = failed_response_type().status
elif not resp_dict['Number Files Created']:
resp_dict['Response Status'] = HTTPBadRequest().status
resp_dict['Response Body'] = 'Invalid Tar File: No Valid Files'
except HTTPException as err:
resp_dict['Response Status'] = err.status
resp_dict['Response Body'] = err.body
except (tarfile.TarError, zlib.error) as tar_error:
resp_dict['Response Status'] = HTTPBadRequest().status
resp_dict['Response Body'] = 'Invalid Tar File: %s' % tar_error
except Exception:
self.logger.exception('Error in extract archive.')
resp_dict['Response Status'] = HTTPServerError().status
yield separator + get_response_body(
out_content_type, resp_dict, failed_files)
def _process_delete(self, delete_path, obj_name, env, resp_dict,
failed_files, failed_file_response, retry=0):
delete_obj_req = Request.blank(delete_path, env)
resp = delete_obj_req.get_response(self.app)
if resp.status_int // 100 == 2:
resp_dict['Number Deleted'] += 1
elif resp.status_int == HTTP_NOT_FOUND:
resp_dict['Number Not Found'] += 1
elif resp.status_int == HTTP_UNAUTHORIZED:
failed_files.append([quote(obj_name),
HTTPUnauthorized().status])
elif resp.status_int == HTTP_CONFLICT and \
self.retry_count > 0 and self.retry_count > retry:
retry += 1
sleep(self.retry_interval ** retry)
self._process_delete(delete_path, obj_name, env, resp_dict,
failed_files, failed_file_response,
retry)
else:
if resp.status_int // 100 == 5:
failed_file_response['type'] = HTTPBadGateway
failed_files.append([quote(obj_name), resp.status])
@wsgify
def __call__(self, req):
extract_type = req.params.get('extract-archive')
resp = None
if extract_type is not None and req.method == 'PUT':
archive_type = {
'tar': '', 'tar.gz': 'gz',
'tar.bz2': 'bz2'}.get(extract_type.lower().strip('.'))
if archive_type is not None:
resp = HTTPOk(request=req)
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
if out_content_type:
resp.content_type = out_content_type
resp.app_iter = self.handle_extract_iter(
req, archive_type, out_content_type=out_content_type)
else:
resp = HTTPBadRequest("Unsupported archive format")
if 'bulk-delete' in req.params and req.method in ['POST', 'DELETE']:
resp = HTTPOk(request=req)
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
if out_content_type:
resp.content_type = out_content_type
resp.app_iter = self.handle_delete_iter(
req, out_content_type=out_content_type)
return resp or self.app
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
max_containers_per_extraction = \
int(conf.get('max_containers_per_extraction', 10000))
max_failed_extractions = int(conf.get('max_failed_extractions', 1000))
max_deletes_per_request = int(conf.get('max_deletes_per_request', 10000))<|fim▁hole|> retry_count = int(conf.get('delete_container_retry_count', 0))
retry_interval = 1.5
register_swift_info(
'bulk_upload',
max_containers_per_extraction=max_containers_per_extraction,
max_failed_extractions=max_failed_extractions)
register_swift_info(
'bulk_delete',
max_deletes_per_request=max_deletes_per_request,
max_failed_deletes=max_failed_deletes)
def bulk_filter(app):
return Bulk(
app, conf,
max_containers_per_extraction=max_containers_per_extraction,
max_failed_extractions=max_failed_extractions,
max_deletes_per_request=max_deletes_per_request,
max_failed_deletes=max_failed_deletes,
yield_frequency=yield_frequency,
retry_count=retry_count,
retry_interval=retry_interval)
return bulk_filter<|fim▁end|> | max_failed_deletes = int(conf.get('max_failed_deletes', 1000))
yield_frequency = int(conf.get('yield_frequency', 10)) |
<|file_name|>cmd_job_cancel.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""
bkr job-cancel: Cancel running Beaker jobs
==========================================
.. program:: bkr job-cancel
Synopsis
--------
:program:`bkr job-cancel` [--msg <message>] [*options*] <taskspec>...
Description
-----------
Specify one or more <taskspec> arguments to be cancelled.
The <taskspec> arguments follow the same format as in other :program:`bkr`
subcommands (for example, ``J:1234``). See :ref:`Specifying tasks <taskspec>`
in :manpage:`bkr(1)`.
Only jobs and recipe sets may be cancelled. It does not make sense to cancel
individual recipes within a recipe set, or tasks within a recipe, so Beaker
does not permit this.
Options
-------
.. option:: --msg <message>
Optionally you can provide a message describing the reason for the
cancellation. This message will be recorded against all outstanding tasks in
the cancelled recipe set, and will be visible in the Beaker web UI.
Common :program:`bkr` options are described in the :ref:`Options
<common-options>` section of :manpage:`bkr(1)`.
Exit status
-----------
Non-zero on error, otherwise zero.
Examples
--------
Cancel job 1234 with a helpful message::
bkr job-cancel --msg "Selected wrong distro, resubmitting job" J:1234
See also
--------
:manpage:`bkr(1)`
"""
from __future__ import print_function
from bkr.client import BeakerCommand
class Job_Cancel(BeakerCommand):
"""
Cancel Jobs/Recipes
"""
enabled = True
def options(self):
self.parser.add_option(
"--msg",
default=None,
help="Optional message to record as to why you cancelled",
)
self.parser.usage = "%%prog %s [options] [J:<id> | RS:<id> ...]" % self.normalized_name
def run(self, *args, **kwargs):
if len(args) < 1:
self.parser.error('Please specify a taskspec to cancel')
self.check_taskspec_args(args, permitted_types=['J', 'RS', 'T'])
msg = kwargs.pop("msg", None)
self.set_hub(**kwargs)
for task in args:<|fim▁hole|> self.hub.taskactions.stop(task, 'cancel', msg)
print('Cancelled %s' % task)<|fim▁end|> | |
<|file_name|>bin.rs<|end_file_name|><|fim▁begin|>#[macro_use]
extern crate malachite_base_test_util;
extern crate malachite_nz;
extern crate malachite_nz_test_util;
extern crate serde;
extern crate serde_json;
use demo_and_bench::register;
use generate::digits_data::generate_string_data;
use malachite_base_test_util::runner::cmd::read_command_line_arguments;
use malachite_base_test_util::runner::Runner;
// Examples:
//
// cargo run -- -l 100000 -m special_random -d demo_natural_from_unsigned_u128 -c
// "mean_run_length_n 4 mean_run_length_d 1"
//
// cargo run --release -- -l 100000 -m random -b benchmark_limbs_to_digits_small_base_algorithms<|fim▁hole|>fn main() {
let args = read_command_line_arguments("malachite-nz test utils");
let mut runner = Runner::new();
register(&mut runner);
if let Some(demo_key) = args.demo_key {
runner.run_demo(&demo_key, args.generation_mode, args.config, args.limit);
} else if let Some(bench_key) = args.bench_key {
runner.run_bench(
&bench_key,
args.generation_mode,
args.config,
args.limit,
&args.out,
);
} else {
let codegen_key = args.codegen_key.unwrap();
match codegen_key.as_str() {
"digits_data" => generate_string_data(),
_ => panic!("Invalid codegen key: {}", codegen_key),
}
}
}
mod demo_and_bench;
mod generate;<|fim▁end|> | //
// cargo run -- -g digits_data |
<|file_name|>ephyview.py<|end_file_name|><|fim▁begin|>import os
import numpy as np
import tables
import galry.pyplot as plt
from galry import Visual, process_coordinates, get_next_color, get_color
from qtools import inthread
MAXSIZE = 5000
CHANNEL_HEIGHT = .25
class MultiChannelVisual(Visual):
def initialize(self, x=None, y=None, color=None, point_size=1.0,
position=None, nprimitives=None, index=None,
color_array_index=None, channel_height=CHANNEL_HEIGHT,
options=None, autocolor=None):
position, shape = process_coordinates(x=x, y=y)
# register the size of the data
self.size = np.prod(shape)
# there is one plot per row
if not nprimitives:
nprimitives = shape[0]<|fim▁hole|> nsamples = self.size // nprimitives
# register the bounds
if nsamples <= 1:
self.bounds = [0, self.size]
else:
self.bounds = np.arange(0, self.size + 1, nsamples)
# automatic color with color map
if autocolor is not None:
if nprimitives <= 1:
color = get_next_color(autocolor)
else:
color = np.array([get_next_color(i + autocolor) for i in xrange(nprimitives)])
# set position attribute
self.add_attribute("position0", ndim=2, data=position, autonormalizable=True)
index = np.array(index)
self.add_index("index", data=index)
if color_array_index is None:
color_array_index = np.repeat(np.arange(nprimitives), nsamples)
color_array_index = np.array(color_array_index)
ncolors = color.shape[0]
ncomponents = color.shape[1]
color = color.reshape((1, ncolors, ncomponents))
dx = 1. / ncolors
offset = dx / 2.
self.add_texture('colormap', ncomponents=ncomponents, ndim=1, data=color)
self.add_attribute('index', ndim=1, vartype='int', data=color_array_index)
self.add_varying('vindex', vartype='int', ndim=1)
self.add_uniform('nchannels', vartype='float', ndim=1, data=float(nprimitives))
self.add_uniform('channel_height', vartype='float', ndim=1, data=channel_height)
self.add_vertex_main("""
vec2 position = position0;
position.y = channel_height * position.y + .9 * (2 * index - (nchannels - 1)) / (nchannels - 1);
vindex = index;
""")
self.add_fragment_main("""
float coord = %.5f + vindex * %.5f;
vec4 color = texture1D(colormap, coord);
out_color = color;
""" % (offset, dx))
# add point size uniform (when it's not specified, there might be some
# bugs where its value is obtained from other datasets...)
self.add_uniform("point_size", data=point_size)
self.add_vertex_main("""gl_PointSize = point_size;""")
def get_view(total_size, xlim, freq):
"""Return the slice of the data.
Arguments:
* xlim: (x0, x1) of the window currently displayed.
"""
# Viewport.
x0, x1 = xlim
d = x1 - x0
dmax = duration
zoom = max(dmax / d, 1)
view_size = total_size / zoom
step = int(np.ceil(view_size / MAXSIZE))
# Extended viewport for data.
x0ex = np.clip(x0 - 3 * d, 0, dmax)
x1ex = np.clip(x1 + 3 * d, 0, dmax)
i0 = np.clip(int(np.round(x0ex * freq)), 0, total_size)
i1 = np.clip(int(np.round(x1ex * freq)), 0, total_size)
return (x0ex, x1ex), slice(i0, i1, step)
def get_undersampled_data(data, xlim, slice):
"""
Arguments:
* data: a HDF5 dataset of size Nsamples x Nchannels.
* xlim: (x0, x1) of the current data view.
"""
# total_size = data.shape[0]
# Get the view slice.
# x0ex, x1ex = xlim
# x0d, x1d = x0ex / (duration_initial) * 2 - 1, x1ex / (duration_initial) * 2 - 1
# Extract the samples from the data (HDD access).
samples = data[slice, :]
# Convert the data into floating points.
samples = np.array(samples, dtype=np.float32)
# Normalize the data.
samples *= (1. / 65535)
# samples *= .25
# Size of the slice.
nsamples, nchannels = samples.shape
# Create the data array for the plot visual.
M = np.empty((nsamples * nchannels, 2))
samples = samples.T# + np.linspace(-1., 1., nchannels).reshape((-1, 1))
M[:, 1] = samples.ravel()
# Generate the x coordinates.
x = np.arange(slice.start, slice.stop, slice.step) / float(total_size - 1)
# [0, 1] -> [-1, 2*duration.duration_initial - 1]
x = x * 2 * duration / duration_initial - 1
M[:, 0] = np.tile(x, nchannels)
# Update the bounds.
bounds = np.arange(nchannels + 1) * nsamples
size = bounds[-1]
return M, bounds, size
@inthread
class DataUpdater(object):
info = {}
def update(self, data, xlimex, slice):
samples, bounds, size = get_undersampled_data(data, xlimex, slice)
nsamples = samples.shape[0]
color_array_index = np.repeat(np.arange(nchannels), nsamples / nchannels)
self.info = dict(position0=samples, bounds=bounds, size=size,
index=color_array_index)
dir = os.path.dirname(os.path.abspath(__file__))
try:
filename = r"test_data/n6mab031109.h5"
f = tables.openFile(os.path.join(dir, filename))
except:
filename = r"test_data/n6mab031109.trim.h5"
f = tables.openFile(os.path.join(dir, filename))
try:
data = f.root.RawData
except:
data = f.root.raw_data
nsamples, nchannels = data.shape
total_size = nsamples
freq = 20000.
dt = 1. / freq
duration = (data.shape[0] - 1) * dt
duration_initial = 5.
x = np.tile(np.linspace(0., duration, nsamples // MAXSIZE), (nchannels, 1))
y = np.zeros_like(x)+ np.linspace(-.9, .9, nchannels).reshape((-1, 1))
plt.figure(toolbar=False, show_grid=True)
plt.visual(MultiChannelVisual, x=x, y=y)
updater = DataUpdater(impatient=True)
SLICE = None
def change_channel_height(figure, parameter):
global CHANNEL_HEIGHT
CHANNEL_HEIGHT *= (1 + parameter)
figure.set_data(channel_height=CHANNEL_HEIGHT)
def pan(figure, parameter):
figure.process_interaction('Pan', parameter)
def anim(figure, parameter):
# Constrain the zoom.
nav = figure.get_processor('navigation')
nav.constrain_navigation = True
nav.xmin = -1
nav.xmax = 2 * duration / duration_initial
nav.sxmin = 1.
zoom = nav.sx
box = nav.get_viewbox()
xlim = ((box[0] + 1) / 2. * (duration_initial), (box[2] + 1) / 2. * (duration_initial))
xlimex, slice = get_view(data.shape[0], xlim, freq)
# Paging system.
dur = xlim[1] - xlim[0]
index = int(np.floor(xlim[0] / dur))
zoom_index = int(np.round(duration_initial / dur))
i = (index, zoom_index)
global SLICE
if i != SLICE:
SLICE = i
updater.update(data, xlimex, slice)
if updater.info:
figure.set_data(**updater.info)
updater.info.clear()
plt.animate(anim, dt=.01)
plt.action('Wheel', change_channel_height, key_modifier='Control',
param_getter=lambda p: p['wheel'] * .001)
plt.action('Wheel', pan, key_modifier='Shift',
param_getter=lambda p: (p['wheel'] * .002, 0))
plt.action('DoubleClick', 'ResetZoom')
plt.xlim(0., duration_initial)
plt.show()
f.close()<|fim▁end|> | nsamples = shape[1]
else: |
<|file_name|>Image.cpp<|end_file_name|><|fim▁begin|>#include "Image.h"
// to avoid compiler confusion, python.hpp must be include before Halide headers
#include <boost/format.hpp>
#include <boost/python.hpp>
#define USE_NUMPY
#ifdef USE_NUMPY
#ifdef USE_BOOST_NUMPY
#include <boost/numpy.hpp>
#else
// we use Halide::numpy
#include "../numpy/numpy.hpp"
#endif
#endif // USE_NUMPY
#include <boost/cstdint.hpp>
#include <boost/functional/hash/hash.hpp>
#include <boost/mpl/list.hpp>
#include "../../src/runtime/HalideBuffer.h"
#include "Func.h"
#include "Type.h"
#include <functional>
#include <string>
#include <unordered_map>
#include <vector>
namespace h = Halide;
namespace p = boost::python;
#ifdef USE_NUMPY
#ifdef USE_BOOST_NUMPY
namespace bn = boost::numpy;
#else
namespace bn = Halide::numpy;
#endif
#endif // USE_NUMPY
template <typename Ret, typename T, typename... Args>
Ret buffer_call_operator(h::Buffer<T> &that, Args... args) {
return that(args...);
}
template <typename T>
h::Expr buffer_call_operator_tuple(h::Buffer<T> &that, p::tuple &args_passed) {
std::vector<h::Expr> expr_args;
for (ssize_t i = 0; i < p::len(args_passed); i++) {
expr_args.push_back(p::extract<h::Expr>(args_passed[i]));
}
return that(expr_args);
}
template <typename T>
T buffer_to_setitem_operator0(h::Buffer<T> &that, int x, T value) {
return that(x) = value;
}
template <typename T>
T buffer_to_setitem_operator1(h::Buffer<T> &that, int x, int y, T value) {
return that(x, y) = value;
}
template <typename T>
T buffer_to_setitem_operator2(h::Buffer<T> &that, int x, int y, int z, T value) {
return that(x, y, z) = value;
}
template <typename T>
T buffer_to_setitem_operator3(h::Buffer<T> &that, int x, int y, int z, int w, T value) {
return that(x, y, z, w) = value;
}
template <typename T>
T buffer_to_setitem_operator4(h::Buffer<T> &that, p::tuple &args_passed, T value) {
std::vector<int> int_args;
const size_t args_len = p::len(args_passed);
for (size_t i = 0; i < args_len; i += 1) {
p::object o = args_passed[i];
p::extract<int> int32_extract(o);
if (int32_extract.check()) {
int_args.push_back(int32_extract());
}
}
if (int_args.size() != args_len) {
for (size_t j = 0; j < args_len; j += 1) {
p::object o = args_passed[j];
const std::string o_str = p::extract<std::string>(p::str(o));
printf("buffer_to_setitem_operator4 args_passed[%lu] == %s\n", j, o_str.c_str());
}
throw std::invalid_argument("buffer_to_setitem_operator4 only handles "
"a tuple of (convertible to) int.");
}
switch (int_args.size()) {
case 1:
return that(int_args[0]) = value;
case 2:
return that(int_args[0], int_args[1]) = value;
case 3:
return that(int_args[0], int_args[1], int_args[2]) = value;
case 4:
return that(int_args[0], int_args[1], int_args[2], int_args[3]) = value;
default:
printf("buffer_to_setitem_operator4 receive a tuple with %zu integers\n", int_args.size());
throw std::invalid_argument("buffer_to_setitem_operator4 only handles 1 to 4 dimensional tuples");
}
return 0; // this line should never be reached
}
template <typename T>
const T *buffer_data(const h::Buffer<T> &buffer) {
return buffer.data();
}
template <typename T>
void buffer_set_min1(h::Buffer<T> &im, int m0) {
im.set_min(m0);
}
template <typename T>
void buffer_set_min2(h::Buffer<T> &im, int m0, int m1) {
im.set_min(m0, m1);
}
template <typename T>
void buffer_set_min3(h::Buffer<T> &im, int m0, int m1, int m2) {
im.set_min(m0, m1, m2);
}
template <typename T>
void buffer_set_min4(h::Buffer<T> &im, int m0, int m1, int m2, int m3) {
im.set_min(m0, m1, m2, m3);
}
template <typename T>
std::string buffer_repr(const h::Buffer<T> &buffer) {
std::string repr;
h::Type t = halide_type_of<T>();
std::string suffix = "_???";
if (t.is_float()) {
suffix = "_float";
} else if (t.is_int()) {
suffix = "_int";
} else if (t.is_uint()) {
suffix = "_uint";
} else if (t.is_bool()) {
suffix = "_bool";
} else if (t.is_handle()) {
suffix = "_handle";
}
boost::format f("<halide.Buffer%s%i; element_size %i bytes; "
"extent (%i %i %i %i); min (%i %i %i %i); stride (%i %i %i %i)>");
repr = boost::str(f % suffix % t.bits() % t.bytes() % buffer.extent(0) % buffer.extent(1) % buffer.extent(2) % buffer.extent(3) % buffer.min(0) % buffer.min(1) % buffer.min(2) % buffer.min(3) % buffer.stride(0) % buffer.stride(1) % buffer.stride(2) % buffer.stride(3));
return repr;
}
template <typename T>
boost::python::object get_type_function_wrapper() {
std::function<h::Type(h::Buffer<T> &)> return_type_func =
[&](h::Buffer<T> &that) -> h::Type { return halide_type_of<T>(); };
auto call_policies = p::default_call_policies();
typedef boost::mpl::vector<h::Type, h::Buffer<T> &> func_sig;
return p::make_function(return_type_func, call_policies, p::arg("self"), func_sig());
}
template <typename T>
void buffer_copy_to_host(h::Buffer<T> &im) {
im.copy_to_host();
}
template <typename T>
void defineBuffer_impl(const std::string suffix, const h::Type type) {
using h::Buffer;
using h::Expr;
auto buffer_class =
p::class_<Buffer<T>>(
("Buffer" + suffix).c_str(),
"A reference-counted handle on a dense multidimensional array "
"containing scalar values of type T. Can be directly accessed and "
"modified. May have up to four dimensions. Color images are "
"represented as three-dimensional, with the third dimension being "
"the color channel. In general we store color images in "
"color-planes, as opposed to packed RGB, because this tends to "
"vectorize more cleanly.",
p::init<>(p::arg("self"), "Construct an undefined buffer handle"));
// Constructors
buffer_class
.def(p::init<int>(
p::args("self", "x"),
"Allocate an buffer with the given dimensions."))
.def(p::init<int, int>(
p::args("self", "x", "y"),
"Allocate an buffer with the given dimensions."))
.def(p::init<int, int, int>(
p::args("self", "x", "y", "z"),
"Allocate an buffer with the given dimensions."))
.def(p::init<int, int, int, int>(
p::args("self", "x", "y", "z", "w"),
"Allocate an buffer with the given dimensions."))
.def(p::init<h::Realization &>(
p::args("self", "r"),
"Wrap a single-element realization in an Buffer object."))
.def(p::init<buffer_t>(
p::args("self", "b"),
"Wrap a buffer_t in an Buffer object, so that we can access its pixels."));
buffer_class
.def("__repr__", &buffer_repr<T>, p::arg("self"));
buffer_class
.def("data", &buffer_data<T>, p::arg("self"),
p::return_value_policy<p::return_opaque_pointer>(), // not sure this will do what we want
"Get a pointer to the element at the min location.")
.def("copy_to_host", &buffer_copy_to_host<T>, p::arg("self"),
"Manually copy-back data to the host, if it's on a device. ")
.def("set_host_dirty", &Buffer<T>::set_host_dirty,
(p::arg("self"), p::arg("dirty") = true),
"Mark the buffer as dirty-on-host. ")
.def("type", get_type_function_wrapper<T>(),
"Return Type instance for the data type of the buffer.")
.def("channels", &Buffer<T>::channels, p::arg("self"),
"Get the extent of dimension 2, which by convention we use as"
"the number of color channels (often 3). Unlike extent(2), "
"returns one if the buffer has fewer than three dimensions.")
.def("dimensions", &Buffer<T>::dimensions, p::arg("self"),
"Get the dimensionality of the data. Typically two for grayscale images, and three for color images.")
.def("stride", &Buffer<T>::stride, p::args("self", "dim"),
"Get the number of elements in the buffer between two adjacent "
"elements in the given dimension. For example, the stride in "
"dimension 0 is usually 1, and the stride in dimension 1 is "
"usually the extent of dimension 0. This is not necessarily true though.")
.def("extent", &Buffer<T>::extent, p::args("self", "dim"),
"Get the size of a dimension.")
.def("min", &Buffer<T>::min, p::args("self", "dim"),
"Get the min coordinate of a dimension. The top left of the "
"buffer represents this point in a function that was realized "
"into this buffer.");
buffer_class
.def("set_min", &buffer_set_min1<T>,
p::args("self", "m0"),
"Set the coordinates corresponding to the host pointer.")
.def("set_min", &buffer_set_min2<T>,
p::args("self", "m0", "m1"),
"Set the coordinates corresponding to the host pointer.")
.def("set_min", &buffer_set_min3<T>,
p::args("self", "m0", "m1", "m2"),
"Set the coordinates corresponding to the host pointer.")
.def("set_min", &buffer_set_min4<T>,
p::args("self", "m0", "m1", "m2", "m3"),
"Set the coordinates corresponding to the host pointer.");
buffer_class
.def("width", &Buffer<T>::width, p::arg("self"),
"Get the extent of dimension 0, which by convention we use as "
"the width of the image. Unlike extent(0), returns one if the "
"buffer is zero-dimensional.")
.def("height", &Buffer<T>::height, p::arg("self"),
"Get the extent of dimension 1, which by convention we use as "
"the height of the image. Unlike extent(1), returns one if the "
"buffer has fewer than two dimensions.")
.def("left", &Buffer<T>::left, p::arg("self"),
"Get the minimum coordinate in dimension 0, which by convention "
"is the coordinate of the left edge of the image. Returns zero "
"for zero-dimensional images.")
.def("right", &Buffer<T>::right, p::arg("self"),
"Get the maximum coordinate in dimension 0, which by convention "
"is the coordinate of the right edge of the image. Returns zero "
"for zero-dimensional images.")
.def("top", &Buffer<T>::top, p::arg("self"),
"Get the minimum coordinate in dimension 1, which by convention "
"is the top of the image. Returns zero for zero- or "
"one-dimensional images.")
.def("bottom", &Buffer<T>::bottom, p::arg("self"),
"Get the maximum coordinate in dimension 1, which by convention "
"is the bottom of the image. Returns zero for zero- or "
"one-dimensional images.");
const char *get_item_doc =
"Construct an expression which loads from this buffer. ";
// Access operators (to Expr, and to actual value)
buffer_class
.def("__getitem__", &buffer_call_operator<Expr, T, Expr>,
p::args("self", "x"),
get_item_doc);
buffer_class
.def("__getitem__", &buffer_call_operator<Expr, T, Expr, Expr>,
p::args("self", "x", "y"),
get_item_doc);
buffer_class
.def("__getitem__", &buffer_call_operator<Expr, T, Expr, Expr, Expr>,
p::args("self", "x", "y", "z"),
get_item_doc)
.def("__getitem__", &buffer_call_operator<Expr, T, Expr, Expr, Expr, Expr>,
p::args("self", "x", "y", "z", "w"),
get_item_doc)
.def("__getitem__", &buffer_call_operator_tuple<T>,
p::args("self", "tuple"),
get_item_doc)
// Note that we return copy values (not references like in the C++ API)
.def("__getitem__", &buffer_call_operator<T, T>,
p::arg("self"),
"Assuming this buffer is zero-dimensional, get its value")
.def("__call__", &buffer_call_operator<T, T, int>,
p::args("self", "x"),
"Assuming this buffer is one-dimensional, get the value of the element at position x")
.def("__call__", &buffer_call_operator<T, T, int, int>,
p::args("self", "x", "y"),
"Assuming this buffer is two-dimensional, get the value of the element at position (x, y)")
.def("__call__", &buffer_call_operator<T, T, int, int, int>,
p::args("self", "x", "y", "z"),
"Assuming this buffer is three-dimensional, get the value of the element at position (x, y, z)")
.def("__call__", &buffer_call_operator<T, T, int, int, int, int>,
p::args("self", "x", "y", "z", "w"),
"Assuming this buffer is four-dimensional, get the value of the element at position (x, y, z, w)")
.def("__setitem__", &buffer_to_setitem_operator0<T>, p::args("self", "x", "value"),
"Assuming this buffer is one-dimensional, set the value of the element at position x")
.def("__setitem__", &buffer_to_setitem_operator1<T>, p::args("self", "x", "y", "value"),
"Assuming this buffer is two-dimensional, set the value of the element at position (x, y)")
.def("__setitem__", &buffer_to_setitem_operator2<T>, p::args("self", "x", "y", "z", "value"),
"Assuming this buffer is three-dimensional, set the value of the element at position (x, y, z)")
.def("__setitem__", &buffer_to_setitem_operator3<T>, p::args("self", "x", "y", "z", "w", "value"),
"Assuming this buffer is four-dimensional, set the value of the element at position (x, y, z, w)")
.def("__setitem__", &buffer_to_setitem_operator4<T>, p::args("self", "tuple", "value"),
"Assuming this buffer is one to four-dimensional, "
"set the value of the element at position indicated by tuple (x, y, z, w)");
p::implicitly_convertible<Buffer<T>, h::Argument>();
return;
}
p::object buffer_to_python_object(const h::Buffer<> &im) {
PyObject *obj = nullptr;
if (im.type() == h::UInt(8)) {
p::manage_new_object::apply<h::Buffer<uint8_t> *>::type converter;
obj = converter(new h::Buffer<uint8_t>(im));
} else if (im.type() == h::UInt(16)) {
p::manage_new_object::apply<h::Buffer<uint16_t> *>::type converter;
obj = converter(new h::Buffer<uint16_t>(im));
} else if (im.type() == h::UInt(32)) {
p::manage_new_object::apply<h::Buffer<uint32_t> *>::type converter;
obj = converter(new h::Buffer<uint32_t>(im));
} else if (im.type() == h::Int(8)) {
p::manage_new_object::apply<h::Buffer<int8_t> *>::type converter;
obj = converter(new h::Buffer<int8_t>(im));
} else if (im.type() == h::Int(16)) {
p::manage_new_object::apply<h::Buffer<int16_t> *>::type converter;
obj = converter(new h::Buffer<int16_t>(im));
} else if (im.type() == h::Int(32)) {
p::manage_new_object::apply<h::Buffer<int32_t> *>::type converter;
obj = converter(new h::Buffer<int32_t>(im));
} else if (im.type() == h::Float(32)) {
p::manage_new_object::apply<h::Buffer<float> *>::type converter;
obj = converter(new h::Buffer<float>(im));
} else if (im.type() == h::Float(64)) {
p::manage_new_object::apply<h::Buffer<double> *>::type converter;
obj = converter(new h::Buffer<double>(im));
} else {
throw std::invalid_argument("buffer_to_python_object received an Buffer of unsupported type.");
}
return p::object(p::handle<>(obj));
}
h::Buffer<> python_object_to_buffer(p::object obj) {
p::extract<h::Buffer<uint8_t>> buffer_extract_uint8(obj);
p::extract<h::Buffer<uint16_t>> buffer_extract_uint16(obj);
p::extract<h::Buffer<uint32_t>> buffer_extract_uint32(obj);
p::extract<h::Buffer<int8_t>> buffer_extract_int8(obj);
p::extract<h::Buffer<int16_t>> buffer_extract_int16(obj);
p::extract<h::Buffer<int32_t>> buffer_extract_int32(obj);
p::extract<h::Buffer<float>> buffer_extract_float(obj);
p::extract<h::Buffer<double>> buffer_extract_double(obj);
if (buffer_extract_uint8.check()) {
return buffer_extract_uint8();
} else if (buffer_extract_uint16.check()) {
return buffer_extract_uint16();
} else if (buffer_extract_uint32.check()) {
return buffer_extract_uint32();
} else if (buffer_extract_int8.check()) {
return buffer_extract_int8();
} else if (buffer_extract_int16.check()) {
return buffer_extract_int16();
} else if (buffer_extract_int32.check()) {
return buffer_extract_int32();
} else if (buffer_extract_float.check()) {
return buffer_extract_float();
} else if (buffer_extract_double.check()) {
return buffer_extract_double();
} else {
throw std::invalid_argument("python_object_to_buffer received an object that is not an Buffer<T>");
}
return h::Buffer<>();
}
#ifdef USE_NUMPY
<|fim▁hole|> if (t == h::Int(8)) return bn::dtype::get_builtin<int8_t>();
if (t == h::Int(16)) return bn::dtype::get_builtin<int16_t>();
if (t == h::Int(32)) return bn::dtype::get_builtin<int32_t>();
if (t == h::Float(32)) return bn::dtype::get_builtin<float>();
if (t == h::Float(64)) return bn::dtype::get_builtin<double>();
throw std::runtime_error("type_to_dtype received a Halide::Type with no known numpy dtype equivalent");
return bn::dtype::get_builtin<uint8_t>();
}
h::Type dtype_to_type(const bn::dtype &t) {
if (t == bn::dtype::get_builtin<uint8_t>()) return h::UInt(8);
if (t == bn::dtype::get_builtin<uint16_t>()) return h::UInt(16);
if (t == bn::dtype::get_builtin<uint32_t>()) return h::UInt(32);
if (t == bn::dtype::get_builtin<int8_t>()) return h::Int(8);
if (t == bn::dtype::get_builtin<int16_t>()) return h::Int(16);
if (t == bn::dtype::get_builtin<int32_t>()) return h::Int(32);
if (t == bn::dtype::get_builtin<float>()) return h::Float(32);
if (t == bn::dtype::get_builtin<double>()) return h::Float(64);
throw std::runtime_error("dtype_to_type received a numpy type with no known Halide type equivalent");
return h::Type();
}
/// Will create a Halide::Buffer object pointing to the array data
p::object ndarray_to_buffer(bn::ndarray &array) {
h::Type t = dtype_to_type(array.get_dtype());
const int dims = array.get_nd();
void *host = reinterpret_cast<void *>(array.get_data());
halide_dimension_t shape[dims];
for (int i = 0; i < dims; i++) {
shape[i].min = 0;
shape[i].extent = array.shape(i);
shape[i].stride = array.strides(i) / t.bytes();
}
return buffer_to_python_object(h::Buffer<>(t, host, dims, shape));
}
bn::ndarray buffer_to_ndarray(p::object buffer_object) {
h::Buffer<> im = python_object_to_buffer(buffer_object);
user_assert(im.data() != nullptr)
<< "buffer_to_ndarray received an buffer without host data";
std::vector<int32_t> extent(im.dimensions()), stride(im.dimensions());
for (int i = 0; i < im.dimensions(); i++) {
extent[i] = im.dim(i).extent();
stride[i] = im.dim(i).stride() * im.type().bytes();
}
return bn::from_data(
im.host_ptr(),
type_to_dtype(im.type()),
extent,
stride,
buffer_object);
}
#endif
struct BufferFactory {
template <typename T, typename... Args>
static p::object create_buffer_object(Args... args) {
typedef h::Buffer<T> BufferType;
typedef typename p::manage_new_object::apply<BufferType *>::type converter_t;
converter_t converter;
PyObject *obj = converter(new BufferType(args...));
return p::object(p::handle<>(obj));
}
template <typename... Args>
static p::object create_buffer_impl(h::Type t, Args... args) {
if (t == h::UInt(8)) return create_buffer_object<uint8_t>(args...);
if (t == h::UInt(16)) return create_buffer_object<uint16_t>(args...);
if (t == h::UInt(32)) return create_buffer_object<uint32_t>(args...);
if (t == h::Int(8)) return create_buffer_object<int8_t>(args...);
if (t == h::Int(16)) return create_buffer_object<int16_t>(args...);
if (t == h::Int(32)) return create_buffer_object<int32_t>(args...);
if (t == h::Float(32)) return create_buffer_object<float>(args...);
if (t == h::Float(64)) return create_buffer_object<double>(args...);
throw std::invalid_argument("BufferFactory::create_buffer_impl received type not handled");
return p::object();
}
static p::object create_buffer0(h::Type type) {
return create_buffer_impl(type);
}
static p::object create_buffer1(h::Type type, int x) {
return create_buffer_impl(type, x);
}
static p::object create_buffer2(h::Type type, int x, int y) {
return create_buffer_impl(type, x, y);
}
static p::object create_buffer3(h::Type type, int x, int y, int z) {
return create_buffer_impl(type, x, y, z);
}
static p::object create_buffer4(h::Type type, int x, int y, int z, int w) {
return create_buffer_impl(type, x, y, z, w);
}
static p::object create_buffer_from_realization(h::Type type, h::Realization &r) {
return create_buffer_impl(type, r);
}
static p::object create_buffer_from_buffer(h::Type type, buffer_t b) {
return create_buffer_impl(type, b);
}
};
void defineBuffer() {
defineBuffer_impl<uint8_t>("_uint8", h::UInt(8));
defineBuffer_impl<uint16_t>("_uint16", h::UInt(16));
defineBuffer_impl<uint32_t>("_uint32", h::UInt(32));
defineBuffer_impl<int8_t>("_int8", h::Int(8));
defineBuffer_impl<int16_t>("_int16", h::Int(16));
defineBuffer_impl<int32_t>("_int32", h::Int(32));
defineBuffer_impl<float>("_float32", h::Float(32));
defineBuffer_impl<double>("_float64", h::Float(64));
// "Buffer" will look as a class, but instead it will be simply a factory method
p::def("Buffer", &BufferFactory::create_buffer0,
p::args("type"),
"Construct a zero-dimensional buffer of type T");
p::def("Buffer", &BufferFactory::create_buffer1,
p::args("type", "x"),
"Construct a one-dimensional buffer of type T");
p::def("Buffer", &BufferFactory::create_buffer2,
p::args("type", "x", "y"),
"Construct a two-dimensional buffer of type T");
p::def("Buffer", &BufferFactory::create_buffer3,
p::args("type", "x", "y", "z"),
"Construct a three-dimensional buffer of type T");
p::def("Buffer", &BufferFactory::create_buffer4,
p::args("type", "x", "y", "z", "w"),
"Construct a four-dimensional buffer of type T");
p::def("Buffer", &BufferFactory::create_buffer_from_realization,
p::args("type", "r"),
p::with_custodian_and_ward_postcall<0, 2>(), // the realization reference count is increased
"Wrap a single-element realization in an Buffer object of type T.");
p::def("Buffer", &BufferFactory::create_buffer_from_buffer,
p::args("type", "b"),
p::with_custodian_and_ward_postcall<0, 2>(), // the buffer_t reference count is increased
"Wrap a buffer_t in an Buffer object of type T, so that we can access its pixels.");
#ifdef USE_NUMPY
bn::initialize();
p::def("ndarray_to_buffer", &ndarray_to_buffer,
p::args("array"),
p::with_custodian_and_ward_postcall<0, 1>(), // the array reference count is increased
"Converts a numpy array into a Halide::Buffer."
"Will take into account the array size, dimensions, and type."
"Created Buffer refers to the array data (no copy).");
p::def("Buffer", &ndarray_to_buffer,
p::args("array"),
p::with_custodian_and_ward_postcall<0, 1>(), // the array reference count is increased
"Wrap numpy array in a Halide::Buffer."
"Will take into account the array size, dimensions, and type."
"Created Buffer refers to the array data (no copy).");
p::def("buffer_to_ndarray", &buffer_to_ndarray,
p::args("buffer"),
p::with_custodian_and_ward_postcall<0, 1>(), // the buffer reference count is increased
"Creates a numpy array from a Halide::Buffer."
"Will take into account the Buffer size, dimensions, and type."
"Created ndarray refers to the Buffer data (no copy).");
#endif
return;
}<|fim▁end|> | bn::dtype type_to_dtype(const h::Type &t) {
if (t == h::UInt(8)) return bn::dtype::get_builtin<uint8_t>();
if (t == h::UInt(16)) return bn::dtype::get_builtin<uint16_t>();
if (t == h::UInt(32)) return bn::dtype::get_builtin<uint32_t>(); |
<|file_name|>auth.service.ts<|end_file_name|><|fim▁begin|>import { Injectable } from '@angular/core';
import { Subject } from 'rxjs/Subject';
import { Observable } from 'rxjs/Observable';<|fim▁hole|>import {Http, Headers} from '@angular/http';
import {Router } from '@angular/router';
import 'rxjs/add/operator/map';
@Injectable()
export class AuthService {
submission: FirebaseListObservable<any>;
constructor(public afAuth: AngularFireAuth, private db: AngularFireDatabase, private router: Router){
console.log("Authentication service started");
console.log(firebase.auth());
}
login(email, pass){
this.afAuth.auth.signInWithEmailAndPassword(email, pass)
.then(res => {
console.log('Nice, logging you in!!!');
this.router.navigate(['/admin']);
});
}
checkAuth(){
this.afAuth.authState.subscribe(res => {
if (res && res.uid) {
console.log('user is logged in');
return true;
} else {
console.log('user not logged in...redirecting to welcome..');
this.router.navigate(['/login']);
return false;
}
});
}
logout() {
this.afAuth.auth.signOut();
this.router.navigate(['/']);
}
}<|fim▁end|> | import { AngularFireDatabase, FirebaseListObservable } from 'angularfire2/database';
import { AngularFireAuth } from 'angularfire2/auth';
import * as firebase from 'firebase/app'; |
<|file_name|>local_settings.py<|end_file_name|><|fim▁begin|>import os
import pymongo
import imp
<|fim▁hole|># django stuff
reference_data_dir = '../data/reference_data'
#DEBUG = True
#COMPRESS_ENABLED = False
GENERATED_FILES_DIR = os.path.join(os.path.dirname(__file__), 'generated_files')
MEDIA_ROOT = os.path.join(GENERATED_FILES_DIR , 'media/')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': os.environ.get('POSTGRES_SERVICE_HOSTNAME', 'localhost'),
'PORT': int(os.environ.get('POSTGRES_SERVICE_PORT', '5432')),
'NAME': 'seqrdb',
'USER': os.environ.get('POSTGRES_USERNAME', 'postgres'),
'PASSWORD': os.environ.get('POSTGRES_PASSWORD', ''),
}
}
ALLOWED_HOSTS = ['*']
EMAIL_BACKEND = "anymail.backends.postmark.EmailBackend"
DEFAULT_FROM_EMAIL = "[email protected]"
ANYMAIL = {
#"SENDGRID_API_KEY": os.environ.get('SENDGRID_API_KEY', 'sendgrid-api-key-placeholder'),
"POSTMARK_SERVER_TOKEN": os.environ.get('POSTMARK_SERVER_TOKEN', 'postmark-server-token-placeholder'),
}
#
# xbrowse stuff
#
REFERENCE_SETTINGS = imp.load_source(
'reference_settings',
os.path.dirname(os.path.realpath(__file__)) + '/reference_settings.py'
)
CUSTOM_ANNOTATOR_SETTINGS = imp.load_source(
'custom_annotation_settings',
os.path.dirname(os.path.realpath(__file__)) + '/custom_annotator_settings.py'
)
ANNOTATOR_SETTINGS = imp.load_source(
'annotator_settings',
os.path.dirname(os.path.realpath(__file__)) + '/annotator_settings.py'
)
_conn = pymongo.MongoClient(host=os.environ.get('MONGO_SERVICE_HOSTNAME', 'localhost:27017'))
DATASTORE_DB = _conn['xbrowse_datastore']
POPULATION_DATASTORE_DB = _conn['xbrowse_pop_datastore']
DEFAULT_CONTROL_COHORT = 'controls'
CONTROL_COHORTS = [
{
'slug': 'controls',
'vcf': '',
},
]
UPLOADED_PEDIGREE_FILE_RECIPIENTS = []
COVERAGE_DB = _conn['xbrowse_coverage']
PROJECT_DATASTORE_DB = _conn['xbrowse_proj_store']
CNV_STORE_DB_NAME = 'xbrowse_cnvs'
CUSTOM_POPULATIONS_DB = _conn['xcustom_refpops']
COVERAGE_DB = _conn['coverage']
READ_VIZ_BAM_PATH = 'https://broad-seqr'
READ_VIZ_CRAM_PATH = 'broad-seqr:5000'
READ_VIZ_USERNAME = "xbrowse-bams"
READ_VIZ_PASSWD = "xbrowse-bams"
ADMINS = [
('Ben Weisburd', '[email protected]'),
('Hana Snow', '[email protected]'),
]<|fim▁end|> | |
<|file_name|>testrun.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import logging
from operator import methodcaller
from typing import List
from django.core.exceptions import ObjectDoesNotExist
from kobo.django.xmlrpc.decorators import user_passes_test
from tcms.issuetracker.models import Issue
from tcms.management.models import TCMSEnvValue, TestTag
from tcms.testcases.models import TestCase
from tcms.testruns.models import TestCaseRun, TestRun
from tcms.xmlrpc.decorators import log_call
from tcms.xmlrpc.utils import distinct_count, pre_process_estimated_time, pre_process_ids
__all__ = (
"add_cases",
"add_tag",
"create",
"env_value",
"filter",
"filter_count",
"get",
"get_issues",
"get_change_history",
"get_completion_report",
"get_env_values",
"get_tags",
"get_test_case_runs",
"get_test_cases",
"get_test_plan",
"link_env_value",
"remove_cases",
"remove_tag",
"unlink_env_value",
"update",
)
__xmlrpc_namespace__ = "TestRun"
logger = logging.getLogger(__name__)
@log_call(namespace=__xmlrpc_namespace__)
@user_passes_test(methodcaller("has_perm", "testruns.add_testcaserun"))
def add_cases(request, run_ids, case_ids):
"""Add one or more cases to the selected test runs.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:param case_ids: give one or more case IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a case ID.
:type case_ids: int, str or list
:return: a list which is empty on success or a list of mappings with
failure codes if a failure occured.
:rtype: list
Example::
# Add case id 10 to run 1
TestRun.add_cases(1, 10)
# Add case ids list [10, 20] to run list [1, 2]
TestRun.add_cases([1, 2], [10, 20])
# Add case ids list '10, 20' to run list '1, 2' with String
TestRun.add_cases('1, 2', '10, 20')
"""
trs = TestRun.objects.filter(run_id__in=pre_process_ids(run_ids))
tcs = TestCase.objects.filter(case_id__in=pre_process_ids(case_ids))
for tr in trs.iterator():
for tc in tcs.iterator():
tr.add_case_run(case=tc)
@log_call(namespace=__xmlrpc_namespace__)
@user_passes_test(methodcaller("has_perm", "testruns.delete_testcaserun"))
def remove_cases(request, run_ids, case_ids):
"""Remove one or more cases from the selected test runs.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:param case_ids: give one or more case IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a case ID.
:type case_ids: int, str or list
:return: a list which is empty on success or a list of mappings with
failure codes if a failure occured.
:rtype: list
Example::
# Remove case 10 from run 1
TestRun.remove_cases(1, 10)
# Remove case ids list [10, 20] from run list [1, 2]
TestRun.remove_cases([1, 2], [10, 20])
# Remove case ids list '10, 20' from run list '1, 2' with String
TestRun.remove_cases('1, 2', '10, 20')
"""
trs = TestRun.objects.filter(run_id__in=pre_process_ids(run_ids))
for tr in trs.iterator():
crs = TestCaseRun.objects.filter(run=tr, case__in=pre_process_ids(case_ids))
crs.delete()
@log_call(namespace=__xmlrpc_namespace__)
@user_passes_test(methodcaller("has_perm", "testruns.add_testruntag"))
def add_tag(request, run_ids, tags):
"""Add one or more tags to the selected test runs.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:param tags: tag name or a list of tag names to remove.
:type tags: str or list
:return: a list which is empty on success or a list of mappings with
failure codes if a failure occured.
:rtype: list
Example::
# Add tag 'foobar' to run 1
TestPlan.add_tag(1, 'foobar')
# Add tag list ['foo', 'bar'] to run list [1, 2]
TestPlan.add_tag([1, 2], ['foo', 'bar'])
# Add tag list ['foo', 'bar'] to run list [1, 2] with String
TestPlan.add_tag('1, 2', 'foo, bar')
"""
trs = TestRun.objects.filter(pk__in=pre_process_ids(value=run_ids))
tags: List[str] = TestTag.string_to_list(tags)
for tag in tags:
t, _ = TestTag.objects.get_or_create(name=tag)
tr: TestRun
for tr in trs.iterator():
tr.add_tag(tag=t)
@log_call(namespace=__xmlrpc_namespace__)
@user_passes_test(methodcaller("has_perm", "testruns.add_testrun"))
def create(request, values):
"""Creates a new Test Run object and stores it in the database.
:param dict values: a mapping containing these data to create a test run.
* plan: (int) **Required** ID of test plan
* build: (int)/(str) **Required** ID of Build
* manager: (int) **Required** ID of run manager
* summary: (str) **Required**
* product: (int) **Required** ID of product
* product_version: (int) **Required** ID of product version
* default_tester: (int) optional ID of run default tester
* plan_text_version: (int) optional
* estimated_time: (str) optional, could be in format ``2h30m30s``, which is recommended or ``HH:MM:SS``.
* notes: (str) optional
* status: (int) optional 0:RUNNING 1:STOPPED (default 0)
* case: list or (str) optional list of case ids to add to the run
* tag: list or (str) optional list of tag to add to the run
:return: a mapping representing newly created :class:`TestRun`.
:rtype: dict
.. versionchanged:: 4.5
Argument ``errata_id`` is removed.
Example::
values = {
'build': 2,
'manager': 1,
'plan': 1,
'product': 1,
'product_version': 2,
'summary': 'Testing XML-RPC for TCMS',
}
TestRun.create(values)
"""
from datetime import datetime
from tcms.core import forms
from tcms.testruns.forms import XMLRPCNewRunForm
if not values.get("product"):
raise ValueError("Value of product is required")
# TODO: XMLRPC only accept HH:MM:SS rather than DdHhMm
if values.get("estimated_time"):
values["estimated_time"] = pre_process_estimated_time(values.get("estimated_time"))
if values.get("case"):
values["case"] = pre_process_ids(value=values["case"])
form = XMLRPCNewRunForm(values)
form.populate(product_id=values["product"])
if form.is_valid():
tr = TestRun.objects.create(
product_version=form.cleaned_data["product_version"],
plan_text_version=form.cleaned_data["plan_text_version"],
stop_date=form.cleaned_data["status"] and datetime.now() or None,
summary=form.cleaned_data["summary"],
notes=form.cleaned_data["notes"],
estimated_time=form.cleaned_data["estimated_time"],
plan=form.cleaned_data["plan"],
build=form.cleaned_data["build"],
manager=form.cleaned_data["manager"],
default_tester=form.cleaned_data["default_tester"],
)
if form.cleaned_data["case"]:
for c in form.cleaned_data["case"]:
tr.add_case_run(case=c)
del c
if form.cleaned_data["tag"]:
tags = form.cleaned_data["tag"]
tags = [c.strip() for c in tags.split(",") if c]
for tag in tags:
t, c = TestTag.objects.get_or_create(name=tag)
tr.add_tag(tag=t)
del tag, t, c
else:
raise ValueError(forms.errors_to_list(form))
return tr.serialize()
def __env_value_operation(request, action: str, run_ids, env_value_ids):
trs = TestRun.objects.filter(pk__in=pre_process_ids(value=run_ids))
evs = TCMSEnvValue.objects.filter(pk__in=pre_process_ids(value=env_value_ids))
for tr in trs.iterator():
for ev in evs.iterator():
try:
func = getattr(tr, action + "_env_value")
func(env_value=ev)
except ObjectDoesNotExist:
logger.debug(
"User %s wants to remove property value %r from test run %r, "
"however this test run does not have that value.",
request.user,
ev,
tr,
)
@log_call(namespace=__xmlrpc_namespace__)
@user_passes_test(methodcaller("has_perm", "testruns.change_tcmsenvrunvaluemap"))
def env_value(request, action, run_ids, env_value_ids):
"""
Add or remove env values to the given runs, function is same as
link_env_value or unlink_env_value
:param str action: what action to do, ``add`` or ``remove``.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:param env_value_ids: give one or more environment value IDs. It could be
an integer, a string containing comma separated IDs, or a list of int
each of them is a environment value ID.
:type env_value_ids: int, str or list
:return: a list which is empty on success or a list of mappings with
failure codes if a failure occured.
:rtype: list
Example::
# Add env value 20 to run id 8
TestRun.env_value('add', 8, 20)
"""
__env_value_operation(request, action, run_ids, env_value_ids)
@log_call(namespace=__xmlrpc_namespace__)
def filter(request, values={}):
"""Performs a search and returns the resulting list of test runs.
:param dict values: a mapping containing these criteria.
* build: ForeignKey: TestBuild
* cc: ForeignKey: Auth.User
* env_value: ForeignKey: Environment Value
* default_tester: ForeignKey: Auth.User
* run_id: (int)
* manager: ForeignKey: Auth.User
* notes: (str)
* plan: ForeignKey: TestPlan
* summary: (str)
* tag: ForeignKey: Tag
* product_version: ForeignKey: Version
:return: list of mappings of found :class:`TestRun`.
:rtype: list
Example::
# Get all of runs contain 'TCMS' in summary
TestRun.filter({'summary__icontain': 'TCMS'})
# Get all of runs managed by xkuang
TestRun.filter({'manager__username': 'xkuang'})
# Get all of runs the manager name starts with x
TestRun.filter({'manager__username__startswith': 'x'})
# Get runs contain the case ID 1, 2, 3
TestRun.filter({'case_run__case__case_id__in': [1, 2, 3]})
"""
return TestRun.to_xmlrpc(values)
@log_call(namespace=__xmlrpc_namespace__)
def filter_count(request, values={}):
"""Performs a search and returns the resulting count of runs.
:param dict values: a mapping containing criteria. See also
:meth:`TestRun.filter <tcms.xmlrpc.api.testrun.filter>`.
:return: total matching runs.
:rtype: int
.. seealso::
See examples of :meth:`TestRun.filter <tcms.xmlrpc.api.testrun.filter>`.
"""
return distinct_count(TestRun, values)
@log_call(namespace=__xmlrpc_namespace__)
def get(request, run_id):
"""Used to load an existing test run from the database.
:param int run_id: test run ID.
:return: a mapping representing found :class:`TestRun`.
:rtype: dict
Example::
TestRun.get(1)
"""
try:
tr = TestRun.objects.get(run_id=run_id)
except TestRun.DoesNotExist as error:
return error
response = tr.serialize()
# get the xmlrpc tags
tag_ids = tr.tag.values_list("id", flat=True)
query = {"id__in": tag_ids}
tags = TestTag.to_xmlrpc(query)
# cut 'id' attribute off, only leave 'name' here
tags_without_id = [tag["name"] for tag in tags]
# replace tag_id list in the serialize return data
response["tag"] = tags_without_id
return response
@log_call(namespace=__xmlrpc_namespace__)
def get_issues(request, run_ids):
"""Get the list of issues attached to this run.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:return: a list of mappings of :class:`Issue <tcms.issuetracker.models.Issue>`.
:rtype: list[dict]
Example::
# Get issues belonging to ID 12345
TestRun.get_issues(1)
# Get issues belonging to run ids list [1, 2]
TestRun.get_issues([1, 2])
# Get issues belonging to run ids list 1 and 2 with string
TestRun.get_issues('1, 2')
"""
query = {"case_run__run__in": pre_process_ids(run_ids)}
return Issue.to_xmlrpc(query)
@log_call(namespace=__xmlrpc_namespace__)
def get_change_history(request, run_id):
"""Get the list of changes to the fields of this run.
:param int run_id: run ID.
:return: list of mapping with changed fields and their details.
:rtype: list
.. warning::
NOT IMPLEMENTED - History is different than before.
"""
raise NotImplementedError("Not implemented RPC method") # pragma: no cover
@log_call(namespace=__xmlrpc_namespace__)
def get_completion_report(request, run_ids):
"""Get a report of the current status of the selected runs combined.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:return: A mapping containing counts and percentages of the combined totals
of case-runs in the run. Counts only the most recently statused
case-run for a given build and environment.
:rtype: dict
.. warning::
NOT IMPLEMENTED
"""
raise NotImplementedError("Not implemented RPC method") # pragma: no cover
@log_call(namespace=__xmlrpc_namespace__)
def get_env_values(request, run_id):
"""Get the list of env values to this run.
<|fim▁hole|> :param int run_id: run ID.
:return: a list of mappings representing found :class:`TCMSEnvValue`.
:rtype: List[dict]
Example::
TestRun.get_env_values(8)
"""
from tcms.management.models import TCMSEnvValue
# FIXME: return [] if run_id is None or ""
query = {"testrun__pk": run_id}
return TCMSEnvValue.to_xmlrpc(query)
@log_call(namespace=__xmlrpc_namespace__)
def get_tags(request, run_id):
"""Get the list of tags attached to this run.
:param int run_id: run ID.
:return: a mapping representing found :class:`TestTag`.
:rtype: dict
Example::
TestRun.get_tags(1)
"""
tr = TestRun.objects.get(run_id=run_id)
tag_ids = tr.tag.values_list("id", flat=True)
query = {"id__in": tag_ids}
return TestTag.to_xmlrpc(query)
@log_call(namespace=__xmlrpc_namespace__)
def get_test_case_runs(request, run_id):
"""Get the list of cases that this run is linked to.
:param int run_id: run ID.
:return: a list of mappings of found :class:`TestCaseRun`.
:rtype: list[dict]
Example::
# Get all of case runs
TestRun.get_test_case_runs(1)
"""
return TestCaseRun.to_xmlrpc({"run__run_id": run_id})
@log_call(namespace=__xmlrpc_namespace__)
def get_test_cases(request, run_id):
"""Get the list of cases that this run is linked to.
:param int run_id: run ID.
:return: a list of mappings of found :class:`TestCase`.
:rtype: list[dict]
Example::
TestRun.get_test_cases(1)
"""
tcs_serializer = TestCase.to_xmlrpc(query={"case_run__run_id": run_id})
qs = TestCaseRun.objects.filter(run_id=run_id).values("case", "pk", "case_run_status__name")
extra_info = {row["case"]: row for row in qs.iterator()}
for case in tcs_serializer:
info = extra_info[case["case_id"]]
case["case_run_id"] = info["pk"]
case["case_run_status"] = info["case_run_status__name"]
return tcs_serializer
@log_call(namespace=__xmlrpc_namespace__)
def get_test_plan(request, run_id):
"""Get the plan that this run is associated with.
:param int run_id: run ID.
:return: a mapping of found :class:`TestPlan`.
:rtype: dict
Example::
TestRun.get_test_plan(1)
"""
return TestRun.objects.select_related("plan").get(run_id=run_id).plan.serialize()
@log_call(namespace=__xmlrpc_namespace__)
@user_passes_test(methodcaller("has_perm", "testruns.delete_testruntag"))
def remove_tag(request, run_ids, tags):
"""Remove a tag from a run.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:param tags: tag name or a list of tag names to remove.
:type tags: str or list
:return: a list which is empty on success.
:rtype: list
Example::
# Remove tag 'foo' from run 1
TestRun.remove_tag(1, 'foo')
# Remove tag 'foo' and 'bar' from run list [1, 2]
TestRun.remove_tag([1, 2], ['foo', 'bar'])
# Remove tag 'foo' and 'bar' from run list '1, 2' with String
TestRun.remove_tag('1, 2', 'foo, bar')
"""
trs = TestRun.objects.filter(run_id__in=pre_process_ids(value=run_ids))
tgs = TestTag.objects.filter(name__in=TestTag.string_to_list(tags))
tr: TestRun
for tr in trs.iterator():
for tg in tgs.iterator():
tr.remove_tag(tag=tg)
@log_call(namespace=__xmlrpc_namespace__)
@user_passes_test(methodcaller("has_perm", "testruns.change_testrun"))
def update(request, run_ids, values):
"""Updates the fields of the selected test run.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:param dict values: a mapping containing these data to update specified
runs.
* plan: (int) TestPlan.plan_id
* product: (int) Product.id
* build: (int) Build.id
* manager: (int) Auth.User.id
* default_tester: Intege Auth.User.id
* summary: (str)
* estimated_time: (TimeDelta) in format ``2h30m30s`` which is recommended or ``HH:MM:SS``.
* product_version: (int)
* plan_text_version: (int)
* notes: (str)
* status: (int) 0:RUNNING 1:FINISHED
:return: list of mappings of the updated test runs.
:rtype: list[dict]
.. versionchanged:: 4.5
Argument ``errata_id`` is removed.
Example::
# Update status to finished for run 1 and 2
TestRun.update([1, 2], {'status': 1})
"""
from datetime import datetime
from tcms.core import forms
from tcms.testruns.forms import XMLRPCUpdateRunForm
if values.get("product_version") and not values.get("product"):
raise ValueError('Field "product" is required by product_version')
if values.get("estimated_time"):
values["estimated_time"] = pre_process_estimated_time(values.get("estimated_time"))
form = XMLRPCUpdateRunForm(values)
if values.get("product_version"):
form.populate(product_id=values["product"])
if form.is_valid():
trs = TestRun.objects.filter(pk__in=pre_process_ids(value=run_ids))
_values = dict()
if form.cleaned_data["plan"]:
_values["plan"] = form.cleaned_data["plan"]
if form.cleaned_data["build"]:
_values["build"] = form.cleaned_data["build"]
if form.cleaned_data["manager"]:
_values["manager"] = form.cleaned_data["manager"]
if "default_tester" in values:
default_tester = form.cleaned_data["default_tester"]
if values.get("default_tester") and default_tester:
_values["default_tester"] = default_tester
else:
_values["default_tester"] = None
if form.cleaned_data["summary"]:
_values["summary"] = form.cleaned_data["summary"]
if values.get("estimated_time") is not None:
_values["estimated_time"] = form.cleaned_data["estimated_time"]
if form.cleaned_data["product_version"]:
_values["product_version"] = form.cleaned_data["product_version"]
if "notes" in values:
if values["notes"] in (None, ""):
_values["notes"] = values["notes"]
if form.cleaned_data["notes"]:
_values["notes"] = form.cleaned_data["notes"]
if form.cleaned_data["plan_text_version"]:
_values["plan_text_version"] = form.cleaned_data["plan_text_version"]
if isinstance(form.cleaned_data["status"], int):
if form.cleaned_data["status"]:
_values["stop_date"] = datetime.now()
else:
_values["stop_date"] = None
trs.update(**_values)
else:
raise ValueError(forms.errors_to_list(form))
query = {"pk__in": trs.values_list("pk", flat=True)}
return TestRun.to_xmlrpc(query)
@log_call(namespace=__xmlrpc_namespace__)
@user_passes_test(methodcaller("has_perm", "testruns.add_tcmsenvrunvaluemap"))
def link_env_value(request, run_ids, env_value_ids):
"""Link env values to the given runs.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:param env_value_ids: give one or more environment value IDs. It could be
an integer, a string containing comma separated IDs, or a list of int
each of them is a environment value ID.
:type env_value_ids: int, str or list
:return: a list which is empty on success or a list of mappings with
failure codes if a failure occured.
:rtype: list
Example::
# Add env value 1 to run id 2
TestRun.link_env_value(2, 1)
"""
return __env_value_operation(request, "add", run_ids, env_value_ids)
@log_call(namespace=__xmlrpc_namespace__)
@user_passes_test(methodcaller("has_perm", "testruns.delete_tcmsenvrunvaluemap"))
def unlink_env_value(request, run_ids, env_value_ids):
"""Unlink env values to the given runs.
:param run_ids: give one or more run IDs. It could be an integer, a
string containing comma separated IDs, or a list of int each of them is
a run ID.
:type run_ids: int, str or list
:param env_value_ids: give one or more environment value IDs. It could be
an integer, a string containing comma separated IDs, or a list of int
each of them is a environment value ID.
:type env_value_ids: int, str or list
:return: a list which is empty on success or a list of mappings with
failure codes if a failure occured.
:rtype: list
Example::
# Unlink env value 1 to run id 2
TestRun.unlink_env_value(2, 1)
"""
return __env_value_operation(request, "remove", run_ids, env_value_ids)<|fim▁end|> | |
<|file_name|>ZkPaths.ts<|end_file_name|><|fim▁begin|>/**
* @module node-zk-treecache
*/
/** */
import { validatePath } from "./PathUtils"
import { Client, ACL, Id, CreateMode, Exception } from "node-zookeeper-client"
const PATH_SEPARATOR = "/"
const ANYONE_ID_UNSAFE = new Id("world", "anyone")
const OPEN_ACL_UNSAFE = [new ACL(31, ANYONE_ID_UNSAFE)]
const EMPTY_CALLBACK = () => {
return
}
const DEFAULT_ACL_PROVIDER: ACLProvider = {
getDefaultAcl() {
return null
},
getAclForPath() {
return null
}
}
/**
* @private
* @param path the path to split
*/
export function split(path: string): string[] {
validatePath(path)
return path.split(PATH_SEPARATOR).filter(s => s.length > 0)
}
export interface ACLProvider {
getDefaultAcl(): ACL[] | null
getAclForPath(path: string): ACL[] | null
}
// Verify if we are running ZK 3.5+, in which case we can use the create mode container (code 4)
function getCreateMode(asContainers: boolean) {
return CreateMode.PERSISTENT
}
/**
* Make sure all the nodes in the path are created. NOTE: Unlike File.mkdirs(), Zookeeper doesn't distinguish
* between directories and files. So, every node in the path is created. The data for each node is an empty blob
*
* @private
* @param zkClient the client
* @param path path to ensure
* @param makeLastNode if true, all nodes are created. If false, only the parent nodes are created
* @param aclProvider if not null, the ACL provider to use when creating parent nodes
* @param asContainers if true, nodes are created as {@link CreateMode#CONTAINER} (need ZK > 3.5)
* @param cb the callback to call after having created the path
*/
export function mkdirs(
zkClient: Client,
path: string,
makeLastNode: boolean,
aclProvider: ACLProvider | null,
asContainers: boolean,
cb?: (err: Error | Exception, p: string) => void
) {
validatePath(path)
let s = split(path)
if (!makeLastNode) {
s.pop()
}
path = "/" + s.join("/")
const mode = getCreateMode(asContainers)
aclProvider = aclProvider || DEFAULT_ACL_PROVIDER
const acl =
aclProvider.getAclForPath(path) ||
aclProvider.getDefaultAcl() ||
OPEN_ACL_UNSAFE
zkClient.mkdirp(path, acl, mode, cb || EMPTY_CALLBACK)
}
/**<|fim▁hole|> * @private
* @param path the path
* @return the node
*/
export function getNodeFromPath(path: string): string {
validatePath(path)
const last = path.split(PATH_SEPARATOR).pop()
if (last === undefined)
throw new Error(`Error while validating ${path}, it should have been valid`)
return last
}
/**
* Given a parent path and a child node, create a combined full path
*
* @param parent the parent
* @param child the child
* @return full path
*/
export function makePath(parent: string, child: string): string {
return [parent.replace(/\/+$/, ""), child.replace(/^\/+/, "")].join("/")
}<|fim▁end|> | * Given a full path, return the node name. i.e. "/one/two/three" will return "three"
* |
<|file_name|>i586_unknown_linux_musl.rs<|end_file_name|><|fim▁begin|>use crate::spec::Target;
pub fn target() -> Target {
let mut base = super::i686_unknown_linux_musl::target();
base.cpu = "pentium".to_string();
base.llvm_target = "i586-unknown-linux-musl".to_string();<|fim▁hole|><|fim▁end|> | base
} |
<|file_name|>audio.js<|end_file_name|><|fim▁begin|>en.resources.define("audio",{
name: "Engine",
src: "./audio/ship_engine.ogg",
}, function(content, callback){
var sound = client.audio.createSound();
sound.load(content.src, function(sound){
content.sound = sound;
callback(content.type, content);
});
<|fim▁hole|><|fim▁end|> | }, function(content){
return content.sound;
}); |
<|file_name|>pydevd_frame.py<|end_file_name|><|fim▁begin|>import linecache
import os.path
import re
import sys
import traceback # @Reimport
from _pydev_bundle import pydev_log
from _pydevd_bundle import pydevd_dont_trace
from _pydevd_bundle import pydevd_vars
from _pydevd_bundle.pydevd_breakpoints import get_exception_breakpoint
from _pydevd_bundle.pydevd_comm import CMD_STEP_CAUGHT_EXCEPTION, CMD_STEP_RETURN, CMD_STEP_OVER, CMD_SET_BREAK, \
CMD_STEP_INTO, CMD_SMART_STEP_INTO, CMD_RUN_TO_LINE, CMD_SET_NEXT_STATEMENT, CMD_STEP_INTO_MY_CODE
from _pydevd_bundle.pydevd_constants import STATE_SUSPEND, dict_contains, get_thread_id, STATE_RUN, dict_iter_values, IS_PY3K, \
dict_keys, dict_pop, RETURN_VALUES_DICT
from _pydevd_bundle.pydevd_dont_trace_files import DONT_TRACE, PYDEV_FILE
from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame, just_raised
from _pydevd_bundle.pydevd_utils import get_clsname_for_code
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame
try:
from inspect import CO_GENERATOR
except:
CO_GENERATOR = 0
try:
from _pydevd_bundle.pydevd_signature import send_signature_call_trace
except ImportError:
def send_signature_call_trace(*args, **kwargs):
pass
basename = os.path.basename
IGNORE_EXCEPTION_TAG = re.compile('[^#]*#.*@IgnoreException')
DEBUG_START = ('pydevd.py', 'run')
DEBUG_START_PY3K = ('_pydev_execfile.py', 'execfile')
TRACE_PROPERTY = 'pydevd_traceproperty.py'
get_file_type = DONT_TRACE.get
#=======================================================================================================================
# PyDBFrame
#=======================================================================================================================
class PyDBFrame: # No longer cdef because object was dying when only a reference to trace_dispatch was kept (need to check alternatives).
'''This makes the tracing for a given frame, so, the trace_dispatch
is used initially when we enter into a new context ('call') and then
is reused for the entire context.
'''
#Note: class (and not instance) attributes.
#Same thing in the main debugger but only considering the file contents, while the one in the main debugger
#considers the user input (so, the actual result must be a join of both).
filename_to_lines_where_exceptions_are_ignored = {}
filename_to_stat_info = {}
should_skip = -1
# IFDEF CYTHON
# def __init__(self, args):
# self._args = args # In the cython version we don't need to pass the frame
# ELSE
def __init__(self, args):
#args = main_debugger, filename, base, info, t, frame
#yeap, much faster than putting in self and then getting it from self later on
self._args = args[:-1] # Remove the frame (we don't want to have a reference to it).
# ENDIF
def set_suspend(self, *args, **kwargs):
self._args[0].set_suspend(*args, **kwargs)
def do_wait_suspend(self, *args, **kwargs):
self._args[0].do_wait_suspend(*args, **kwargs)
# IFDEF CYTHON
# def trace_exception(self, frame, str event, arg):
# cdef bint flag;
# ELSE
def trace_exception(self, frame, event, arg):
# ENDIF
if event == 'exception':
flag, frame = self.should_stop_on_exception(frame, event, arg)
if flag:
self.handle_exception(frame, event, arg)
return self.trace_dispatch
return self.trace_exception
# IFDEF CYTHON
# def should_stop_on_exception(self, frame, str event, arg):
# cdef PyDBAdditionalThreadInfo info;
# cdef bint flag;
# ELSE
def should_stop_on_exception(self, frame, event, arg):
# ENDIF
# main_debugger, _filename, info, _thread = self._args
main_debugger = self._args[0]
info = self._args[2]
flag = False
if info.pydev_state != STATE_SUSPEND: #and breakpoint is not None:
exception, value, trace = arg
if trace is not None: #on jython trace is None on the first event
exception_breakpoint = get_exception_breakpoint(
exception, main_debugger.break_on_caught_exceptions)
if exception_breakpoint is not None:
if exception_breakpoint.ignore_libraries:
if exception_breakpoint.notify_on_first_raise_only:
if main_debugger.first_appearance_in_scope(trace):
add_exception_to_frame(frame, (exception, value, trace))
try:
info.pydev_message = exception_breakpoint.qname
except:
info.pydev_message = exception_breakpoint.qname.encode('utf-8')
flag = True
else:
pydev_log.debug("Ignore exception %s in library %s" % (exception, frame.f_code.co_filename))
flag = False
else:
if not exception_breakpoint.notify_on_first_raise_only or just_raised(trace):
add_exception_to_frame(frame, (exception, value, trace))
try:
info.pydev_message = exception_breakpoint.qname
except:
info.pydev_message = exception_breakpoint.qname.encode('utf-8')
flag = True
else:
flag = False
else:
try:
if main_debugger.plugin is not None:
result = main_debugger.plugin.exception_break(main_debugger, self, frame, self._args, arg)
if result:
(flag, frame) = result
except:
flag = False
return flag, frame
def handle_exception(self, frame, event, arg):
try:
# print 'handle_exception', frame.f_lineno, frame.f_code.co_name
# We have 3 things in arg: exception type, description, traceback object
trace_obj = arg[2]
main_debugger = self._args[0]
if not hasattr(trace_obj, 'tb_next'):
return #Not always there on Jython...
initial_trace_obj = trace_obj
if trace_obj.tb_next is None and trace_obj.tb_frame is frame:
#I.e.: tb_next should be only None in the context it was thrown (trace_obj.tb_frame is frame is just a double check).
if main_debugger.break_on_exceptions_thrown_in_same_context:
#Option: Don't break if an exception is caught in the same function from which it is thrown
return
else:
#Get the trace_obj from where the exception was raised...
while trace_obj.tb_next is not None:
trace_obj = trace_obj.tb_next
if main_debugger.ignore_exceptions_thrown_in_lines_with_ignore_exception:
for check_trace_obj in (initial_trace_obj, trace_obj):
filename = get_abs_path_real_path_and_base_from_frame(check_trace_obj.tb_frame)[1]
filename_to_lines_where_exceptions_are_ignored = self.filename_to_lines_where_exceptions_are_ignored
lines_ignored = filename_to_lines_where_exceptions_are_ignored.get(filename)
if lines_ignored is None:
lines_ignored = filename_to_lines_where_exceptions_are_ignored[filename] = {}
try:
curr_stat = os.stat(filename)
curr_stat = (curr_stat.st_size, curr_stat.st_mtime)
except:
curr_stat = None
last_stat = self.filename_to_stat_info.get(filename)
if last_stat != curr_stat:
self.filename_to_stat_info[filename] = curr_stat
lines_ignored.clear()
try:
linecache.checkcache(filename)
except:
#Jython 2.1
linecache.checkcache()
from_user_input = main_debugger.filename_to_lines_where_exceptions_are_ignored.get(filename)
if from_user_input:
merged = {}
merged.update(lines_ignored)
#Override what we have with the related entries that the user entered
merged.update(from_user_input)
else:
merged = lines_ignored
exc_lineno = check_trace_obj.tb_lineno
# print ('lines ignored', lines_ignored)
# print ('user input', from_user_input)
# print ('merged', merged, 'curr', exc_lineno)
if not dict_contains(merged, exc_lineno): #Note: check on merged but update lines_ignored.
try:
line = linecache.getline(filename, exc_lineno, check_trace_obj.tb_frame.f_globals)
except:
#Jython 2.1
line = linecache.getline(filename, exc_lineno)
if IGNORE_EXCEPTION_TAG.match(line) is not None:
lines_ignored[exc_lineno] = 1
return
else:
#Put in the cache saying not to ignore
lines_ignored[exc_lineno] = 0
else:
#Ok, dict has it already cached, so, let's check it...
if merged.get(exc_lineno, 0):
return
thread = self._args[3]
try:
frame_id_to_frame = {}
frame_id_to_frame[id(frame)] = frame
f = trace_obj.tb_frame
while f is not None:
frame_id_to_frame[id(f)] = f
f = f.f_back
f = None
thread_id = get_thread_id(thread)
pydevd_vars.add_additional_frame_by_id(thread_id, frame_id_to_frame)
try:
main_debugger.send_caught_exception_stack(thread, arg, id(frame))
self.set_suspend(thread, CMD_STEP_CAUGHT_EXCEPTION)
self.do_wait_suspend(thread, frame, event, arg)
main_debugger.send_caught_exception_stack_proceeded(thread)
finally:
pydevd_vars.remove_additional_frame_by_id(thread_id)
except:
traceback.print_exc()
main_debugger.set_trace_for_frame_and_parents(frame)
finally:
#Clear some local variables...
trace_obj = None
initial_trace_obj = None
check_trace_obj = None
f = None
frame_id_to_frame = None
main_debugger = None
thread = None
def get_func_name(self, frame):
code_obj = frame.f_code
func_name = code_obj.co_name
try:
cls_name = get_clsname_for_code(code_obj, frame)
if cls_name is not None:
return "%s.%s" % (cls_name, func_name)
else:
return func_name
except:
traceback.print_exc()
return func_name
def manage_return_values(self, main_debugger, frame, event, arg):
try:
if main_debugger.show_return_values:
if event == "return" and hasattr(frame, "f_code") and hasattr(frame.f_code, "co_name"):
if hasattr(frame, "f_back") and hasattr(frame.f_back, "f_locals"):
return_values_dict = frame.f_back.f_locals.get(RETURN_VALUES_DICT, None)
if return_values_dict is None:
return_values_dict = {}
frame.f_back.f_locals[RETURN_VALUES_DICT] = return_values_dict
name = self.get_func_name(frame)
return_values_dict[name] = arg
if main_debugger.remove_return_values_flag:
# Showing return values was turned off, we should remove them from locals dict.
# The values can be in the current frame or in the back one
dict_pop(frame.f_locals, RETURN_VALUES_DICT, None)
if hasattr(frame, "f_back") and hasattr(frame.f_back, "f_locals"):
dict_pop(frame.f_back.f_locals, RETURN_VALUES_DICT, None)
main_debugger.remove_return_values_flag = False
except:
main_debugger.remove_return_values_flag = False
traceback.print_exc()
# IFDEF CYTHON
# def trace_dispatch(self, frame, str event, arg):
# cdef str filename;
# cdef bint is_exception_event;
# cdef bint has_exception_breakpoints;
# cdef bint can_skip;
# cdef PyDBAdditionalThreadInfo info;
# cdef int step_cmd;
# cdef int line;
# cdef str curr_func_name;
# cdef bint exist_result;
# ELSE
def trace_dispatch(self, frame, event, arg):
# ENDIF
main_debugger, filename, info, thread = self._args
try:
# print 'frame trace_dispatch', frame.f_lineno, frame.f_code.co_name, event
info.is_tracing = True
if main_debugger._finish_debugging_session:
return None
if event == 'call' and main_debugger.signature_factory:
send_signature_call_trace(main_debugger, frame, filename)
plugin_manager = main_debugger.plugin
is_exception_event = event == 'exception'
has_exception_breakpoints = main_debugger.break_on_caught_exceptions or main_debugger.has_plugin_exception_breaks
if is_exception_event:
if has_exception_breakpoints:
flag, frame = self.should_stop_on_exception(frame, event, arg)
if flag:
self.handle_exception(frame, event, arg)
return self.trace_dispatch
elif event not in ('line', 'call', 'return'):
#I believe this can only happen in jython on some frontiers on jython and java code, which we don't want to trace.
return None
stop_frame = info.pydev_step_stop
step_cmd = info.pydev_step_cmd
if is_exception_event:
breakpoints_for_file = None
if stop_frame and stop_frame is not frame and step_cmd == CMD_STEP_OVER and \
arg[0] in (StopIteration, GeneratorExit) and arg[2] is None:
info.pydev_step_cmd = CMD_STEP_INTO
info.pydev_step_stop = None
else:
# If we are in single step mode and something causes us to exit the current frame, we need to make sure we break
# eventually. Force the step mode to step into and the step stop frame to None.
# I.e.: F6 in the end of a function should stop in the next possible position (instead of forcing the user
# to make a step in or step over at that location).
# Note: this is especially troublesome when we're skipping code with the
# @DontTrace comment.
if stop_frame is frame and event == 'return' and step_cmd in (CMD_STEP_RETURN, CMD_STEP_OVER):
if not frame.f_code.co_flags & CO_GENERATOR:
info.pydev_step_cmd = CMD_STEP_INTO
info.pydev_step_stop = None
breakpoints_for_file = main_debugger.breakpoints.get(filename)
can_skip = False
if info.pydev_state == STATE_RUN:
#we can skip if:
#- we have no stop marked
#- we should make a step return/step over and we're not in the current frame
can_skip = (step_cmd == -1 and stop_frame is None)\
or (step_cmd in (CMD_STEP_RETURN, CMD_STEP_OVER) and stop_frame is not frame)
if can_skip and plugin_manager is not None and main_debugger.has_plugin_line_breaks:
can_skip = not plugin_manager.can_not_skip(main_debugger, self, frame)
if can_skip and main_debugger.show_return_values:
# trace function for showing return values after step over
if info.pydev_step_cmd == CMD_STEP_OVER and hasattr(frame, "f_back") and frame.f_back == info.pydev_step_stop:
can_skip = False
# Let's check to see if we are in a function that has a breakpoint. If we don't have a breakpoint,
# we will return nothing for the next trace
#also, after we hit a breakpoint and go to some other debugging state, we have to force the set trace anyway,
#so, that's why the additional checks are there.
if not breakpoints_for_file:
if can_skip:
if has_exception_breakpoints:
return self.trace_exception
else:
return None
else:
#checks the breakpoint to see if there is a context match in some function
curr_func_name = frame.f_code.co_name
#global context is set with an empty name
if curr_func_name in ('?', '<module>'):
curr_func_name = ''
for breakpoint in dict_iter_values(breakpoints_for_file): #jython does not support itervalues()
#will match either global or some function
if breakpoint.func_name in ('None', curr_func_name):
break
else: # if we had some break, it won't get here (so, that's a context that we want to skip)
if can_skip:
if has_exception_breakpoints:
return self.trace_exception
else:
return None
#We may have hit a breakpoint or we are already in step mode. Either way, let's check what we should do in this frame
#print 'NOT skipped', frame.f_lineno, frame.f_code.co_name, event
try:
line = frame.f_lineno
flag = False
#return is not taken into account for breakpoint hit because we'd have a double-hit in this case
#(one for the line and the other for the return).
stop_info = {}
breakpoint = None
exist_result = False
stop = False
bp_type = None
if not flag and event != 'return' and info.pydev_state != STATE_SUSPEND and breakpoints_for_file is not None \
and dict_contains(breakpoints_for_file, line):
breakpoint = breakpoints_for_file[line]
new_frame = frame
stop = True
if step_cmd == CMD_STEP_OVER and stop_frame is frame and event in ('line', 'return'):
stop = False #we don't stop on breakpoint if we have to stop by step-over (it will be processed later)
elif plugin_manager is not None and main_debugger.has_plugin_line_breaks:
result = plugin_manager.get_breakpoint(main_debugger, self, frame, event, self._args)
if result:
exist_result = True
(flag, breakpoint, new_frame, bp_type) = result
if breakpoint:
#ok, hit breakpoint, now, we have to discover if it is a conditional breakpoint
# lets do the conditional stuff here
if stop or exist_result:
condition = breakpoint.condition
if condition is not None:
try:
val = eval(condition, new_frame.f_globals, new_frame.f_locals)
if not val:
return self.trace_dispatch
except:
if type(condition) != type(''):
if hasattr(condition, 'encode'):
condition = condition.encode('utf-8')
msg = 'Error while evaluating expression: %s\n' % (condition,)
sys.stderr.write(msg)
traceback.print_exc()
if not main_debugger.suspend_on_breakpoint_exception:
return self.trace_dispatch
else:
stop = True
try:
# add exception_type and stacktrace into thread additional info
etype, value, tb = sys.exc_info()
try:
error = ''.join(traceback.format_exception_only(etype, value))
stack = traceback.extract_stack(f=tb.tb_frame.f_back)
# On self.set_suspend(thread, CMD_SET_BREAK) this info will be
# sent to the client.
info.conditional_breakpoint_exception = \
('Condition:\n' + condition + '\n\nError:\n' + error, stack)
finally:
etype, value, tb = None, None, None
except:
traceback.print_exc()
if breakpoint.expression is not None:
try:
try:
val = eval(breakpoint.expression, new_frame.f_globals, new_frame.f_locals)
except:
val = sys.exc_info()[1]
finally:
if val is not None:
info.pydev_message = str(val)
if not main_debugger.first_breakpoint_reached:
if event == 'call':
if hasattr(frame, 'f_back'):
back = frame.f_back
if back is not None:
# When we start debug session, we call execfile in pydevd run function. It produces an additional
# 'call' event for tracing and we stop on the first line of code twice.
_, back_filename, base = get_abs_path_real_path_and_base_from_frame(back)
if (base == DEBUG_START[0] and back.f_code.co_name == DEBUG_START[1]) or \
(base == DEBUG_START_PY3K[0] and back.f_code.co_name == DEBUG_START_PY3K[1]):
stop = False
main_debugger.first_breakpoint_reached = True
else:
# if the frame is traced after breakpoint stop,
# but the file should be ignored while stepping because of filters
if step_cmd != -1:
if main_debugger.is_filter_enabled and main_debugger.is_ignored_by_filters(filename):
# ignore files matching stepping filters
return self.trace_dispatch
if main_debugger.is_filter_libraries and main_debugger.not_in_scope(filename):
# ignore library files while stepping
return self.trace_dispatch
if main_debugger.show_return_values or main_debugger.remove_return_values_flag:
self.manage_return_values(main_debugger, frame, event, arg)
if stop:
self.set_suspend(thread, CMD_SET_BREAK)
if breakpoint and breakpoint.suspend_policy == "ALL":
main_debugger.suspend_all_other_threads(thread)
elif flag and plugin_manager is not None:
result = plugin_manager.suspend(main_debugger, thread, frame, bp_type)
if result:
frame = result
# if thread has a suspend flag, we suspend with a busy wait
if info.pydev_state == STATE_SUSPEND:
self.do_wait_suspend(thread, frame, event, arg)
return self.trace_dispatch
except:
traceback.print_exc()
raise
#step handling. We stop when we hit the right frame
try:
should_skip = 0
if pydevd_dont_trace.should_trace_hook is not None:
if self.should_skip == -1:
# I.e.: cache the result on self.should_skip (no need to evaluate the same frame multiple times).
# Note that on a code reload, we won't re-evaluate this because in practice, the frame.f_code
# Which will be handled by this frame is read-only, so, we can cache it safely.
if not pydevd_dont_trace.should_trace_hook(frame, filename):
# -1, 0, 1 to be Cython-friendly
should_skip = self.should_skip = 1
else:
should_skip = self.should_skip = 0
else:
should_skip = self.should_skip
plugin_stop = False
if should_skip:
stop = False
elif step_cmd == CMD_STEP_INTO:
stop = event in ('line', 'return')
if plugin_manager is not None:
result = plugin_manager.cmd_step_into(main_debugger, frame, event, self._args, stop_info, stop)
if result:
stop, plugin_stop = result<|fim▁hole|>
elif step_cmd == CMD_STEP_INTO_MY_CODE:
if not main_debugger.not_in_scope(frame.f_code.co_filename):
stop = event == 'line'
elif step_cmd == CMD_STEP_OVER:
stop = stop_frame is frame and event in ('line', 'return')
if frame.f_code.co_flags & CO_GENERATOR:
if event == 'return':
stop = False
if plugin_manager is not None:
result = plugin_manager.cmd_step_over(main_debugger, frame, event, self._args, stop_info, stop)
if result:
stop, plugin_stop = result
elif step_cmd == CMD_SMART_STEP_INTO:
stop = False
if info.pydev_smart_step_stop is frame:
info.pydev_func_name = '.invalid.' # Must match the type in cython
info.pydev_smart_step_stop = None
if event == 'line' or event == 'exception':
curr_func_name = frame.f_code.co_name
#global context is set with an empty name
if curr_func_name in ('?', '<module>') or curr_func_name is None:
curr_func_name = ''
if curr_func_name == info.pydev_func_name:
stop = True
elif step_cmd == CMD_STEP_RETURN:
stop = event == 'return' and stop_frame is frame
elif step_cmd == CMD_RUN_TO_LINE or step_cmd == CMD_SET_NEXT_STATEMENT:
stop = False
if event == 'line' or event == 'exception':
#Yes, we can only act on line events (weird hum?)
#Note: This code is duplicated at pydevd.py
#Acting on exception events after debugger breaks with exception
curr_func_name = frame.f_code.co_name
#global context is set with an empty name
if curr_func_name in ('?', '<module>'):
curr_func_name = ''
if curr_func_name == info.pydev_func_name:
line = info.pydev_next_line
if frame.f_lineno == line:
stop = True
else:
if frame.f_trace is None:
frame.f_trace = self.trace_dispatch
frame.f_lineno = line
frame.f_trace = None
stop = True
else:
stop = False
if stop and step_cmd != -1 and IS_PY3K:
# in Py3k we start script via our custom `execfile` function, and we shouldn't stop there
# while stepping when execution is finished
if event == 'return' and hasattr(frame, "f_back") and hasattr(frame.f_back, "f_code"):
back_filename = os.path.basename(frame.f_back.f_code.co_filename)
file_type = get_file_type(back_filename)
if file_type == PYDEV_FILE:
stop = False
if plugin_stop:
stopped_on_plugin = plugin_manager.stop(main_debugger, frame, event, self._args, stop_info, arg, step_cmd)
elif stop:
if event == 'line':
self.set_suspend(thread, step_cmd)
self.do_wait_suspend(thread, frame, event, arg)
else: #return event
back = frame.f_back
if back is not None:
#When we get to the pydevd run function, the debugging has actually finished for the main thread
#(note that it can still go on for other threads, but for this one, we just make it finish)
#So, just setting it to None should be OK
_, back_filename, base = get_abs_path_real_path_and_base_from_frame(back)
if base == DEBUG_START[0] and back.f_code.co_name == DEBUG_START[1]:
back = None
elif base == TRACE_PROPERTY:
# We dont want to trace the return event of pydevd_traceproperty (custom property for debugging)
#if we're in a return, we want it to appear to the user in the previous frame!
return None
elif pydevd_dont_trace.should_trace_hook is not None:
if not pydevd_dont_trace.should_trace_hook(back, back_filename):
# In this case, we'll have to skip the previous one because it shouldn't be traced.
# Also, we have to reset the tracing, because if the parent's parent (or some
# other parent) has to be traced and it's not currently, we wouldn't stop where
# we should anymore (so, a step in/over/return may not stop anywhere if no parent is traced).
# Related test: _debugger_case17a.py
main_debugger.set_trace_for_frame_and_parents(back, overwrite_prev_trace=True)
return None
if back is not None:
#if we're in a return, we want it to appear to the user in the previous frame!
self.set_suspend(thread, step_cmd)
self.do_wait_suspend(thread, back, event, arg)
else:
#in jython we may not have a back frame
info.pydev_step_stop = None
info.pydev_step_cmd = -1
info.pydev_state = STATE_RUN
except KeyboardInterrupt:
raise
except:
try:
traceback.print_exc()
info.pydev_step_cmd = -1
except:
return None
#if we are quitting, let's stop the tracing
retVal = None
if not main_debugger.quitting:
retVal = self.trace_dispatch
return retVal
finally:
info.is_tracing = False
#end trace_dispatch<|fim▁end|> | |
<|file_name|>test_som.py<|end_file_name|><|fim▁begin|>import os
import pytest
from som.vm.current import current_universe
@pytest.mark.parametrize(
"test_name",
[
"Array",<|fim▁hole|> "ClassLoading",
"ClassStructure",
"Closure",
"Coercion",
"CompilerReturn",
"DoesNotUnderstand",
"Double",
"Empty",
"Global",
"Hash",
"Integer",
"Preliminary",
"Reflection",
"SelfBlock",
"SpecialSelectors",
"Super",
"Set",
"String",
"Symbol",
"System",
"Vector",
],
)
def test_som(test_name):
current_universe.reset(True)
core_lib_path = os.path.dirname(os.path.abspath(__file__)) + "/../core-lib/"
args = [
"-cp",
core_lib_path + "Smalltalk",
core_lib_path + "TestSuite/TestHarness.som",
test_name,
]
current_universe.interpret(args)
assert current_universe.last_exit_code() == 0<|fim▁end|> | "Block", |
<|file_name|>SingleLineTextField.tsx<|end_file_name|><|fim▁begin|>import * as React from 'react';
import { FormControl, FormGroup, ControlLabel, Col } from 'react-bootstrap';
import { IField, IFieldError, IFieldBuilderProps, IFieldInputProps } from '../../../../src/data';
interface IState {
}
export default class SingleLineTextField extends React.PureComponent<IFieldInputProps & IFieldBuilderProps, IState> {
public static defaultProps = {
value: ''
} as IFieldInputProps & IFieldBuilderProps
constructor(props: IFieldInputProps & IFieldBuilderProps) {
super(props);
this.onTextFieldChange = this.onTextFieldChange.bind(this);
this.validate = this.validate.bind(this);
}
public componentDidMount() {
const error = this.validate(this.props.value);
if (this.props.onValueChange) {
this.props.onValueChange(this.props.field, this.props.value, error);
}
}
public render() {
const { label } = this.props.field;
const { hint, required } = this.props.field.options ? this.props.field.options : null;
return (
<div>
<FormGroup className='clearfix'>
<Col componentClass={ControlLabel} md={5}>{label}</Col>
<Col md={7}><|fim▁hole|> );
}
private onTextFieldChange(event: any) {
const value = event.target.value;
const error = this.validate(value);
this.props.onValueChange(this.props.field, value, error);
}
private validate(value: string): IFieldError {
if (this.props.field.options && this.props.field.options.required) {
if (value.trim() === '') {
return {
error: true,
errorMsg: "required"
};
}
}
return null;
}
}<|fim▁end|> | <FormControl type='text' placeholder={hint} required={required} value={this.props.value} onChange={this.onTextFieldChange} />
</Col>
</FormGroup>
</div> |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# @copyright (C) 2014-2015
#Developpeurs 'BARDOU AUGUSTIN - BREZILLON ANTOINE - EUZEN DAVID - FRANCOIS SEBASTIEN - JOUNEAU NICOLAS - KIBEYA AISHA - LE CONG SEBASTIEN -
# MAGREZ VALENTIN - NGASSAM NOUMI PAOLA JOVANY - OUHAMMOUCH SALMA - RIAND MORGAN - TREIMOLEIRO ALEX - TRULLA AURELIEN '
# @license https://www.gnu.org/licenses/gpl-3.0.html GPL version 3
from models import *
from django.contrib.auth.models import User as django_User
from datetime import datetime
from django import forms
from django.contrib.gis.geos import Point
class LoginForm(forms.ModelForm):
class Meta:
model = User
widgets = {
'mail': forms.EmailInput(attrs={'aria-invalid': 'true', 'pattern': 'email', 'required': 'required'}),
}
exclude = ['name', 'firstname', 'sex', 'city', 'zipCode', 'phone', 'idHomeAddress', 'idWorkAddress']
class EmailAuthBackend(object):
def authenticate(self,username=None, password=None):
try:
user = django_User.objects.get(email=username)
if user and check_password(password, user.password):
return user
except django_User.DoesNotExist:
return None
def authenticate2(self,username=None, password=None):
try:
user = Provider.objects.filter(idUser__mail__contains=username).first()
if user and (check_password(password, user.password)):
return user
except User.DoesNotExist:
return None
def auth_email(self, username=None):
try:
user = Provider.objects.filter(idUser__mail__contains=username).first()
if user:
return user
except User.DoesNotExist:
return None
def auth_email2(self, username=None):
try:
user = django_User.objects.get(email=username)
if user:
return user
except User.DoesNotExist:
return None
class ContactForm(forms.Form):
firstname = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'required': 'required'}))
lastname = forms.CharField(max_length=100, widget=forms.TextInput(attrs={'required': 'required'}))
phone = forms.CharField(widget=forms.TextInput(
attrs={'maxlength': '10', 'aria-invalid': 'true', 'pattern': 'phone', 'required': 'required'}))
sender = forms.EmailField(widget=forms.EmailInput(attrs={'aria-invalid': 'false', 'pattern': 'email'}), required=False)
subjectCHOICES = (('Demandeur','Je cherche un trajet'),('Offreur','Je souhaite proposer un trajet'),
('Infos','Informations diverses'),('Autre','Autre'))
subject = forms.ChoiceField(choices=subjectCHOICES)
goalOfApplicationCHOICES = [('', '')] + list(MenusSettings.objects.filter(type="goalOfApplication").values_list('string', 'string'))
goalOfApplication = forms.ChoiceField(widget=forms.Select(attrs={'required':'required'}), choices=goalOfApplicationCHOICES, required=False)
yearOfBirthCHOICES = (tuple((str(n), str(n)) for n in range(1900, datetime.now().year - 15))+(('',''),))[::-1]
yearOfBirth = forms.ChoiceField(widget=forms.Select(attrs={'required':'required'}), choices=yearOfBirthCHOICES, required=False)
message = forms.CharField(widget=forms.Textarea(attrs={'required': 'required'}))
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.fields['goalOfApplication'].choices = get_menus_settings('goalOfApplication')
def get_menus_settings(type, required=True):
if required:
return [('', '')] + list(MenusSettings.objects.filter(type=type).values_list('string', 'string'))
else:
return list(MenusSettings.objects.filter(type=type).values_list('string', 'string'))
class UserRegisterForm(forms.ModelForm):
class Meta:
model = User
widgets = {
'name': forms.TextInput(attrs={'required': 'required'}),
'firstname': forms.TextInput(attrs={'required': 'required'}),
'sex': forms.RadioSelect(attrs={'required': 'required'}),
'city': forms.TextInput(attrs={'required': 'required'}),
'zipCode': forms.TextInput(attrs={'maxlength': '5', 'aria-invalid': 'true', 'pattern': 'zipCode',
'required': 'required'}),
'mail': forms.EmailInput(attrs={'aria-invalid': 'true', 'pattern': 'email', 'required': 'required'}),
'phone': forms.TextInput(attrs={'maxlength': '10', 'aria-invalid': 'true',
'pattern': 'phone', 'required': 'required'}),
}
exclude = ['idHomeAddress', 'idWorkAddress']
class ProviderRegisterForm(forms.ModelForm):
class Meta:<|fim▁hole|> 'company': forms.TextInput(attrs={'list':'datalistCompany', 'autocomplete':'off'}),
'howKnowledge': forms.Select(attrs={'required':'required'}, choices=howKnowledgeCHOICES)
}
exclude = ['idUser', 'is_active', 'last_login']
def __init__(self, *args, **kwargs):
super(ProviderRegisterForm, self).__init__(*args, **kwargs)
self.fields['howKnowledge'].choices = get_menus_settings('howKnowledge')
class ProviderForm2(forms.ModelForm):
class Meta:
model = Provider
howKnowledgeCHOICES = [('','')] + list(MenusSettings.objects.filter(type="howKnowledge").values_list('string', 'string'))
widgets = {
'company': forms.TextInput(attrs={'list': 'datalistCompany', 'autocomplete': 'off'}),
'howKnowledge': forms.Select(attrs={'required': 'required'}, choices=howKnowledgeCHOICES)
}
exclude = ['idUser', 'is_active', 'last_login', 'password']
def __init__(self, *args, **kwargs):
super(ProviderForm2, self).__init__(*args, **kwargs)
self.fields['howKnowledge'].choices = get_menus_settings('howKnowledge')
class AddressRegisterForm(forms.ModelForm):
latlng = forms.CharField(widget=forms.HiddenInput(), required=False,)
cityHide = forms.CharField(widget=forms.HiddenInput(), required=False,)
zipCodeHide = forms.CharField(widget=forms.HiddenInput(), required=False,)
class Meta:
model = Address
widgets = {
'street':forms.TextInput(attrs={'class': 'field', 'placeholder': 'Indiquez un lieu',
'autocomplete': 'on', 'required': 'required'}),
}
exclude = ['idAddress', 'point', 'city', 'zipCode']
def clean(self):
cleaned_data = super(AddressRegisterForm, self).clean()
coord = cleaned_data['latlng'].replace('(', '')
city = cleaned_data['cityHide']
zipcode = cleaned_data['zipCodeHide']
if city == "":
city = "undefined"
if zipcode == "undefined" or zipcode == "":
zipcode = 0
if coord == "" or coord == "undefined":
raise forms.ValidationError("Bad address")
coord = coord.replace(')', '')
coordTab = coord.split(',')
cleaned_data['point'] = 'POINT(%f %f)' % (float(coordTab[0]), float(coordTab[1]))
cleaned_data['city'] = city
cleaned_data['zipCode'] = zipcode
return cleaned_data
class AddressRegisterFormWork(forms.ModelForm):
latlng = forms.CharField(widget=forms.HiddenInput(), required=False,)
cityHide = forms.CharField(widget=forms.HiddenInput(), required=False,)
zipCodeHide = forms.CharField(widget=forms.HiddenInput(), required=False,)
class Meta:
model = Address
widgets = {
'street': forms.TextInput(attrs={'class': 'field', 'placeholder': 'Indiquez un lieu', 'autocomplete': 'on',
'required': 'required'}),
}
exclude = ['idAddress', 'point', 'city', 'zipCode']
def clean(self):
cleaned_data = super(AddressRegisterFormWork, self).clean()
coord = cleaned_data['latlng'].replace('(', '')
city = cleaned_data['cityHide']
zipcode = cleaned_data['zipCodeHide']
if city == "":
city = "undefined"
if zipcode == "undefined" or zipcode == "":
zipcode = 0
if coord == "" or coord == "undefined":
raise forms.ValidationError("Bad address")
coord = coord.replace(')', '')
coordtab = coord.split(',')
cleaned_data['point'] = 'POINT(%f %f)' % (float(coordtab[0]), float(coordtab[1]))
cleaned_data['city'] = city
cleaned_data['zipCode']= zipcode
return cleaned_data
class PathDepartureRegisterForm(forms.ModelForm):
class Meta:
model = Path
widgets = {
'type': forms.HiddenInput(),
'day': forms.HiddenInput(),
'weekNumber': forms.HiddenInput(),
'schedule': forms.TimeInput(attrs={'class': 'time', 'data-format': 'HH:mm', 'data-template': 'HH : mm',
'value': '08:00'}),
}
exclude = ['idPath', 'idProvider', 'departure', 'arrival', 'startingWeek']
class PathArrivalRegisterForm(forms.ModelForm):
class Meta:
model = Path
widgets = {
'type': forms.HiddenInput(),
'day': forms.HiddenInput(),
'weekNumber': forms.HiddenInput(),
'schedule': forms.TimeInput(attrs={'class': 'time', 'data-format': 'HH:mm', 'data-template': 'HH : mm',
'value':'18:00'}),
}
exclude = ['idPath', 'idProvider', 'departure', 'arrival', 'startingWeek']
class TestUserRegisterForm(forms.ModelForm):
class Meta:
model = User
widgets = {
'name': forms.TextInput(attrs={'required': 'required'}),
'firstname': forms.TextInput(attrs={'required': 'required'}),
'city': forms.TextInput(attrs={'required': 'required'}),
'zipCode': forms.TextInput(attrs={'maxlength': '5', 'aria-invalid': 'true', 'pattern': 'zipCode', 'required': 'required'}),
'mail': forms.EmailInput(attrs={'aria-invalid': 'true', 'pattern': 'email', 'required': 'required'}),
'phone': forms.TextInput(attrs={'maxlength': '10', 'aria-invalid': 'true', 'pattern': 'phone', 'required': 'required'}),
}
exclude = ['idHomeAddress', 'idWorkAddress', 'sex']
class newMdpForm(forms.Form):
oldmdp = forms.CharField(widget=forms.PasswordInput(), label='Ancien mot de passe', required=True)
newmdp1 = forms.CharField(widget=forms.PasswordInput(), label='Nouveau mot de passe', required=True)<|fim▁end|> | model = Provider
howKnowledgeCHOICES = get_menus_settings('howKnowledge')
widgets = {
'password': forms.PasswordInput(attrs={'id': 'password', 'required': 'required'}), |
<|file_name|>UpdatePasswordWidget.java<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2020 Emweb bv, Herent, Belgium.
*
* See the LICENSE file for terms of use.
*/
package eu.webtoolkit.jwt.auth;
import eu.webtoolkit.jwt.*;
import eu.webtoolkit.jwt.chart.*;
import eu.webtoolkit.jwt.servlet.*;
import eu.webtoolkit.jwt.utils.*;
import java.io.*;
import java.lang.ref.*;
import java.time.*;
import java.util.*;
import java.util.regex.*;
import javax.servlet.*;
import javax.servlet.http.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A widget which allows a user to choose a new password.
*
* <p>This widget lets a user choose a new password.
*
* <p>The widget renders the <code>"Wt.Auth.template.update-password"</code> template.
* Optionally, it asks for the current password, as well as a new password.
*
* <p>
*
* @see AuthWidget#createUpdatePasswordView(User user, boolean promptPassword)
*/
public class UpdatePasswordWidget extends WTemplateFormView {
private static Logger logger = LoggerFactory.getLogger(UpdatePasswordWidget.class);
/**
* Constructor.
*<|fim▁hole|> final User user,
RegistrationModel registrationModel,
final AuthModel authModel,
WContainerWidget parentContainer) {
super(tr("Wt.Auth.template.update-password"), (WContainerWidget) null);
this.user_ = user;
this.registrationModel_ = registrationModel;
this.authModel_ = authModel;
this.updated_ = new Signal();
this.canceled_ = new Signal();
this.registrationModel_.setValue(
RegistrationModel.LoginNameField, user.getIdentity(Identity.LoginName));
this.registrationModel_.setReadOnly(RegistrationModel.LoginNameField, true);
if (user.getPassword().isEmpty()) {
this.authModel_ = null;
} else {
if (this.authModel_ != null) {
this.authModel_.reset();
}
}
if (this.authModel_ != null && this.authModel_.getBaseAuth().isEmailVerificationEnabled()) {
this.registrationModel_.setValue(
RegistrationModel.EmailField, user.getEmail() + " " + user.getUnverifiedEmail());
}
this.registrationModel_.setVisible(RegistrationModel.EmailField, false);
WPushButton okButton = new WPushButton(tr("Wt.WMessageBox.Ok"));
this.bindWidget("ok-button", okButton);
WPushButton cancelButton = new WPushButton(tr("Wt.WMessageBox.Cancel"));
this.bindWidget("cancel-button", cancelButton);
if (this.authModel_ != null) {
this.authModel_.setValue(AuthModel.LoginNameField, user.getIdentity(Identity.LoginName));
this.updateViewField(this.authModel_, AuthModel.PasswordField);
this.authModel_.configureThrottling(okButton);
WLineEdit password = (WLineEdit) this.resolveWidget(AuthModel.PasswordField);
password.setFocus(true);
}
this.updateView(this.registrationModel_);
WLineEdit password = (WLineEdit) this.resolveWidget(RegistrationModel.ChoosePasswordField);
WLineEdit password2 = (WLineEdit) this.resolveWidget(RegistrationModel.RepeatPasswordField);
WText password2Info =
(WText) this.resolveWidget(RegistrationModel.RepeatPasswordField + "-info");
this.registrationModel_.validatePasswordsMatchJS(password, password2, password2Info);
if (!(this.authModel_ != null)) {
password.setFocus(true);
}
okButton
.clicked()
.addListener(
this,
(WMouseEvent e1) -> {
UpdatePasswordWidget.this.doUpdate();
});
cancelButton
.clicked()
.addListener(
this,
(WMouseEvent e1) -> {
UpdatePasswordWidget.this.cancel();
});
if (parentContainer != null) parentContainer.addWidget(this);
}
/**
* Constructor.
*
* <p>Calls {@link #UpdatePasswordWidget(User user, RegistrationModel registrationModel, AuthModel
* authModel, WContainerWidget parentContainer) this(user, registrationModel, authModel,
* (WContainerWidget)null)}
*/
public UpdatePasswordWidget(
final User user, RegistrationModel registrationModel, final AuthModel authModel) {
this(user, registrationModel, authModel, (WContainerWidget) null);
}
/** {@link Signal} emitted when the password was updated. */
public Signal updated() {
return this.updated_;
}
/** {@link Signal} emitted when cancel clicked. */
public Signal canceled() {
return this.canceled_;
}
protected WWidget createFormWidget(String field) {
WFormWidget result = null;
if (field == RegistrationModel.LoginNameField) {
result = new WLineEdit();
} else {
if (field == AuthModel.PasswordField) {
WLineEdit p = new WLineEdit();
p.setEchoMode(EchoMode.Password);
result = p;
} else {
if (field == RegistrationModel.ChoosePasswordField) {
WLineEdit p = new WLineEdit();
p.setEchoMode(EchoMode.Password);
p.keyWentUp()
.addListener(
this,
(WKeyEvent e1) -> {
UpdatePasswordWidget.this.checkPassword();
});
p.changed()
.addListener(
this,
() -> {
UpdatePasswordWidget.this.checkPassword();
});
result = p;
} else {
if (field == RegistrationModel.RepeatPasswordField) {
WLineEdit p = new WLineEdit();
p.setEchoMode(EchoMode.Password);
p.changed()
.addListener(
this,
() -> {
UpdatePasswordWidget.this.checkPassword2();
});
result = p;
}
}
}
}
return result;
}
private User user_;
private RegistrationModel registrationModel_;
private AuthModel authModel_;
private Signal updated_;
private Signal canceled_;
private void checkPassword() {
this.updateModelField(this.registrationModel_, RegistrationModel.ChoosePasswordField);
this.registrationModel_.validateField(RegistrationModel.ChoosePasswordField);
this.updateViewField(this.registrationModel_, RegistrationModel.ChoosePasswordField);
}
private void checkPassword2() {
this.updateModelField(this.registrationModel_, RegistrationModel.RepeatPasswordField);
this.registrationModel_.validateField(RegistrationModel.RepeatPasswordField);
this.updateViewField(this.registrationModel_, RegistrationModel.RepeatPasswordField);
}
private boolean validate() {
boolean valid = true;
if (this.authModel_ != null) {
this.updateModelField(this.authModel_, AuthModel.PasswordField);
if (!this.authModel_.validate()) {
this.updateViewField(this.authModel_, AuthModel.PasswordField);
valid = false;
}
}
this.registrationModel_.validateField(RegistrationModel.LoginNameField);
this.checkPassword();
this.checkPassword2();
this.registrationModel_.validateField(RegistrationModel.EmailField);
if (!this.registrationModel_.isValid()) {
valid = false;
}
return valid;
}
private void doUpdate() {
if (this.validate()) {
String password = this.registrationModel_.valueText(RegistrationModel.ChoosePasswordField);
this.registrationModel_.getPasswordAuth().updatePassword(this.user_, password);
this.registrationModel_.getLogin().login(this.user_);
this.updated_.trigger();
}
}
private void cancel() {
this.canceled_.trigger();
}
}<|fim▁end|> | * <p>If <code>authModel</code> is not <code>null</code>, the user also has to authenticate first
* using his current password.
*/
public UpdatePasswordWidget( |
<|file_name|>delegate.js<|end_file_name|><|fim▁begin|>steal('can/util', 'can/observe', function(can) {
// ** - 'this' will be the deepest item changed
// * - 'this' will be any changes within *, but * will be the
// this returned
// tells if the parts part of a delegate matches the broken up props of the event
// gives the prop to use as 'this'
// - parts - the attribute name of the delegate split in parts ['foo','*']
// - props - the split props of the event that happened ['foo','bar','0']
// - returns - the attribute to delegate too ('foo.bar'), or null if not a match
var matches = function(parts, props){
//check props parts are the same or
var len = parts.length,
i =0,
// keeps the matched props we will use
matchedProps = [],
prop;
// if the event matches
for(i; i< len; i++){
prop = props[i]
// if no more props (but we should be matching them)
// return null
if( typeof prop !== 'string' ) {
return null;
} else
// if we have a "**", match everything
if( parts[i] == "**" ) {
return props.join(".");
} else
// a match, but we want to delegate to "*"
if (parts[i] == "*"){
// only do this if there is nothing after ...
matchedProps.push(prop);
}
else if( prop === parts[i] ) {
matchedProps.push(prop);
} else {
return null;
}
}
return matchedProps.join(".");
},
// gets a change event and tries to figure out which
// delegates to call
delegate = function(event, prop, how, newVal, oldVal){
// pre-split properties to save some regexp time
var props = prop.split("."),
delegates = (this._observe_delegates || []).slice(0),
delegate,
attr,
matchedAttr,
hasMatch,
valuesEqual;
event.attr = prop;
event.lastAttr = props[props.length -1 ];
// for each delegate
for(var i =0; delegate = delegates[i++];){
// if there is a batchNum, this means that this
// event is part of a series of events caused by a single
// attrs call. We don't want to issue the same event
// multiple times
// setting the batchNum happens later
if((event.batchNum && delegate.batchNum === event.batchNum) || delegate.undelegated ){
continue;
}
// reset match and values tests
hasMatch = undefined;
valuesEqual = true;
// for each attr in a delegate
for(var a =0 ; a < delegate.attrs.length; a++){
attr = delegate.attrs[a];
// check if it is a match
if(matchedAttr = matches(attr.parts, props)){
hasMatch = matchedAttr;
}
// if it has a value, make sure it's the right value
// if it's set, we should probably check that it has a
// value no matter what
if(attr.value && valuesEqual /* || delegate.hasValues */){
valuesEqual = attr.value === ""+this.attr(attr.attr)
} else if (valuesEqual && delegate.attrs.length > 1){
// if there are multiple attributes, each has to at
// least have some value
valuesEqual = this.attr(attr.attr) !== undefined
}
}
// if there is a match and valuesEqual ... call back
if(hasMatch && valuesEqual) {
// how to get to the changed property from the delegate
var from = prop.replace(hasMatch+".","");
// if this event is part of a batch, set it on the delegate
// to only send one event
if(event.batchNum ){
delegate.batchNum = event.batchNum
}
// if we listen to change, fire those with the same attrs
// TODO: the attrs should probably be using from
if( delegate.event === 'change' ){
arguments[1] = from;
event.curAttr = hasMatch;
delegate.callback.apply(this.attr(hasMatch), can.makeArray( arguments));
} else if(delegate.event === how ){
// if it's a match, callback with the location of the match
delegate.callback.apply(this.attr(hasMatch), [event,newVal, oldVal, from]);
} else if(delegate.event === 'set' &&
how == 'add' ) {
// if we are listening to set, we should also listen to add
delegate.callback.apply(this.attr(hasMatch), [event,newVal, oldVal, from]);
}
}
}
};
can.extend(can.Observe.prototype,{
/**
* @function can.Observe.prototype.delegate
* @parent can.Observe.delegate
* @plugin can/observe/delegate
*
* `delegate( selector, event, handler(ev,newVal,oldVal,from) )` listen for changes
* in a child attribute from the parent. The child attribute
* does not have to exist.
*
*
* // create an observable
* var observe = can.Observe({
* foo : {
* bar : "Hello World"
* }
* })
*
* //listen to changes on a property
* observe.delegate("foo.bar","change", function(ev, prop, how, newVal, oldVal){
* // foo.bar has been added, set, or removed
* this //->
* });
*
* // change the property
* observe.attr('foo.bar',"Goodbye Cruel World")
*
* ## Types of events
*
* Delegate lets you listen to add, set, remove, and change events on property.
*
* __add__
*
* An add event is fired when a new property has been added.
*
* var o = new can.Control({});
* o.delegate("name","add", function(ev, value){
* // called once
* can.$('#name').show()
* })
* o.attr('name',"Justin")
* o.attr('name',"Brian");
*
* Listening to add events is useful for 'setup' functionality (in this case
* showing the <code>#name</code> element.
*
* __set__
*
* Set events are fired when a property takes on a new value. set events are
* always fired after an add.
*
* o.delegate("name","set", function(ev, value){
* // called twice
* can.$('#name').text(value)
* })
* o.attr('name',"Justin")
* o.attr('name',"Brian");
*
* __remove__
*
* Remove events are fired after a property is removed.
*
* o.delegate("name","remove", function(ev){
* // called once
* $('#name').text(value)
* })
* o.attr('name',"Justin");
* o.removeAttr('name');
*
* ## Wildcards - matching multiple properties
*
* Sometimes, you want to know when any property within some part
* of an observe has changed. Delegate lets you use wildcards to
* match any property name. The following listens for any change
* on an attribute of the params attribute:
*
* var o = can.Control({
* options : {
* limit : 100,
* offset: 0,
* params : {<|fim▁hole|> * parentId: 5
* }
* }
* })
* o.delegate('options.*','change', function(){
* alert('1');
* })
* o.delegate('options.**','change', function(){
* alert('2');
* })
*
* // alerts 1
* // alerts 2
* o.attr('options.offset',100)
*
* // alerts 2
* o.attr('options.params.parentId',6);
*
* Using a single wildcard (<code>*</code>) matches single level
* properties. Using a double wildcard (<code>**</code>) matches
* any deep property.
*
* ## Listening on multiple properties and values
*
* Delegate lets you listen on multiple values at once. The following listens
* for first and last name changes:
*
* var o = new can.Observe({
* name : {first: "Justin", last: "Meyer"}
* })
*
* o.bind("name.first,name.last",
* "set",
* function(ev,newVal,oldVal,from){
*
* })
*
* ## Listening when properties are a particular value
*
* Delegate lets you listen when a property is __set__ to a specific value:
*
* var o = new can.Observe({
* name : "Justin"
* })
*
* o.bind("name=Brian",
* "set",
* function(ev,newVal,oldVal,from){
*
* })
*
* @param {String} selector The attributes you want to listen for changes in.
*
* Selector should be the property or
* property names of the element you are searching. Examples:
*
* "name" - listens to the "name" property changing
* "name, address" - listens to "name" or "address" changing
* "name address" - listens to "name" or "address" changing
* "address.*" - listens to property directly in address
* "address.**" - listens to any property change in address
* "foo=bar" - listens when foo is "bar"
*
* @param {String} event The event name. One of ("set","add","remove","change")
* @param {Function} handler(ev,newVal,oldVal,prop) The callback handler
* called with:
*
* - newVal - the new value set on the observe
* - oldVal - the old value set on the observe
* - prop - the prop name that was changed
*
* @return {jQuery.Delegate} the delegate for chaining
*/
delegate : function(selector, event, handler){
selector = can.trim(selector);
var delegates = this._observe_delegates || (this._observe_delegates = []),
attrs = [];
// split selector by spaces
selector.replace(/([^\s=]+)=?([^\s]+)?/g, function(whole, attr, value){
attrs.push({
// the attribute name
attr: attr,
// the attribute's pre-split names (for speed)
parts: attr.split('.'),
// the value associated with this prop
value: value
})
});
// delegates has pre-processed info about the event
delegates.push({
// the attrs name for unbinding
selector : selector,
// an object of attribute names and values {type: 'recipe',id: undefined}
// undefined means a value was not defined
attrs : attrs,
callback : handler,
event: event
});
if(delegates.length === 1){
this.bind("change",delegate)
}
return this;
},
/**
* @function can.Observe.prototype.undelegate
* @parent can.Observe.delegate
*
* `undelegate( selector, event, handler )` removes a delegated event handler from an observe.
*
* observe.undelegate("name","set", handler )
*
* @param {String} selector the attribute name of the object you want to undelegate from.
* @param {String} event the event name
* @param {Function} handler the callback handler
* @return {jQuery.Delegate} the delegate for chaining
*/
undelegate : function(selector, event, handler){
selector = can.trim(selector);
var i =0,
delegates = this._observe_delegates || [],
delegateOb;
if(selector){
while(i < delegates.length){
delegateOb = delegates[i];
if( delegateOb.callback === handler ||
(!handler && delegateOb.selector === selector) ){
delegateOb.undelegated = true;
delegates.splice(i,1)
} else {
i++;
}
}
} else {
// remove all delegates
delegates = [];
}
if(!delegates.length){
//can.removeData(this, "_observe_delegates");
this.unbind("change",delegate)
}
return this;
}
});
// add helpers for testing ..
can.Observe.prototype.delegate.matches = matches;
return can.Observe;
})<|fim▁end|> | |
<|file_name|>SessionIdentifierGenerator.java<|end_file_name|><|fim▁begin|>package com.example.heregpsloc;
import java.security.SecureRandom;
import java.math.BigInteger;
// http://stackoverflow.com/questions/41107/how-to-generate-a-random-alpha-numeric-string
public final class SessionIdentifierGenerator {
private SecureRandom random = new SecureRandom();
public String nextSessionId() {
return new BigInteger(130, random).toString(32);
}<|fim▁hole|><|fim▁end|> | } |
<|file_name|>edit-app.js<|end_file_name|><|fim▁begin|>define(() => {
function postLoad(ipy, editor, savewidget, events) {
function navigateAlternate(alt) {
var url = document.location.href.replace('/edit', alt);
if (url.includes("?")) {
url = url.slice(0, url.lastIndexOf("?"));
}
url = url + '?download=true';
if (!editor.clean) {
editor.save().then(function() {
window.open(url);
});
}
else {
window.open(url);
}
}
$('#saveButton').click(function() {
editor.save();
})
$('#renameButton').click(function() {
new savewidget.SaveWidget('span#save_widget', { editor: editor, events: events }).rename();
})
$('#downloadButton').click(function() {
navigateAlternate('/files');
})
}<|fim▁hole|> };
});<|fim▁end|> |
return {
postLoad: postLoad |
<|file_name|>page.config.js<|end_file_name|><|fim▁begin|>module.exports = {
'tokens': [
{
'type': 'category',
'name': 'Colors',
'tokens': [
{
'variable': '--app-accent-color',
'name': 'App Accent Color',
'type': 'color',
'themes': {
'plain': {
'value': 'black',
'variable': '--plain-black-color'
},
'funky': {
'value': 'orange',
'variable': '--funky-orange-color'
}
},
'description': 'Accent color'
},
{
'variable': '--app-call-to-action-color',
'name': 'App Call To Action Color',
'type': 'color',
'themes': {
'plain': {
'value': 'red',
'variable': '--plain-red-color'
},
'funky': {
'value': 'orange',
'variable': '--funky-orange-color'
}
},
'description': 'Primary interaction color'
}
]
},
{
'type': 'category',
'name': 'Shadows',
'tokens': [
{
'variable': '--app-box-shadow',
'name': 'App Box Shadow',
'type': 'shadow',
'themes': {
'plain': {
'value': '0 0 8px 2px rgba(92,43,54,0.2)'
},
'funky': {
'value': '0 0 8px 2px rgba(0,0,0,0.2)'
}
}
}
]
},
{
'type': 'category',
'name': 'Borders',
'tokens': [
{
'variable': '--app-border-radius',
'name': 'App Border Radius',
'type': 'border-radius',
'themes': {
'plain': {
'value': '20rem'
},
'funky': {
'value': '3px'
}
}
},
{
'variable': '--app-border-width',
'name': 'App Border Width',
'type': 'border-width',
'themes': {
'plain': {
'value': '2px'
},
'funky': {
'value': '7px'
}
}
},
{
'variable': '--app-border-style',
'name': 'App Border Style',
'type': 'border-style',
'themes': {
'plain': {
'value': 'dashed'
},
'funky': {
'value': 'dotted'
}
}
}
]
},
{
'type': 'category',
'name': 'Opacity',
'tokens': [
{
'variable': '--app-opacity-30',
'name': 'Opacity 30%',
'type': 'opacity',
'themes': {
'plain': {
'value': '0.3'
},
'funky': {
'value': '0.3'
}
}
},
{
'variable': '--app-opacity-60',
'name': 'Opacity 60%',
'type': 'opacity',
'themes': {
'plain': {
'value': '0.6'
},
'funky': {
'value': '0.6'
}
}
},
{
'variable': '--app-opacity-90',
'name': 'Opacity 90%',
'type': 'opacity',
'themes': {
'plain': {
'value': '0.9'
},
'funky': {
'value': '0.9'
}
}
}
]
},
{
'type': 'category',
'name': 'Spaces',
'tokens': [
{
'variable': '--app-space-xs',
'name': 'App Space XS',
'type': 'size',
'themes': {
'plain': {
'value': '0.25rem'
},
'funky': {
'value': '0.25rem'
}
}
},
{
'variable': '--app-space-s',
'name': 'App Space S',
'type': 'size',
'themes': {
'plain': {
'value': '0.5rem'
},
'funky': {
'value': '0.5rem'
}
}
},
{
'variable': '--app-space-m',
'name': 'App Space M',
'type': 'size',<|fim▁hole|> 'funky': {
'value': '1rem'
}
}
},
{
'variable': '--app-space-l',
'name': 'App Space L',
'type': 'size',
'themes': {
'plain': {
'value': '1.5rem'
},
'funky': {
'value': '1.5rem'
}
}
},
{
'variable': '--app-space-xl',
'name': 'App Space XL',
'type': 'size',
'themes': {
'plain': {
'value': '2rem'
},
'funky': {
'value': '2rem'
}
}
},
{
'variable': '--app-space-xxl',
'name': 'App Space XXL',
'type': 'size',
'themes': {
'plain': {
'value': '4rem'
},
'funky': {
'value': '4rem'
}
}
}
]
}
]
}<|fim▁end|> | 'themes': {
'plain': {
'value': '1rem'
}, |
<|file_name|>DragAndDrop.controller.js<|end_file_name|><|fim▁begin|>sap.ui.define([
"sap/ui/core/mvc/Controller",
"sap/ui/model/json/JSONModel",
"sap/m/MessageToast"
], function (Controller, JSONModel, MessageToast) {
"use strict";
var TABLESETTINGS = window.TABLESETTINGS;
return Controller.extend("sap.ui.table.testApps.DragAndDrop", {
onInit: function () {
var oTable = this.byId("table");
var oTreeTable = this.byId("treetable");
var oModel = new JSONModel();
oModel.setData({
listData: TABLESETTINGS.listTestData,
treeData: TABLESETTINGS.treeTestData
});
oTable.setModel(oModel);
oTreeTable.setModel(oModel);
oTable.addExtension(new sap.m.Toolbar());
TABLESETTINGS.init(oTable, function(oButton) {
oTable.getExtension()[0].addContent(oButton);
});
oTreeTable.addExtension(new sap.m.Toolbar());
// TODO: Make table settings interoperable with multi table pages
//TABLESETTINGS.init(oTreeTable, function(oButton) {
// oTreeTable.getExtension()[0].addContent(oButton);
//});
window.oTable = oTable;
window.oTreeTable = oTreeTable;
},
tableDragStart: function(oEvent) {
var oRow = oEvent.getParameter("target");
var oRowContext = oRow.getBindingContext();
var oModelProperty = oRowContext.getModel().getProperty(oRowContext.getPath());
var sStatus = oModelProperty && oModelProperty.objStatusState != null ? oModelProperty.objStatusState : "";
if (sStatus !== "Success") {
oEvent.preventDefault();
}
this.showDragStartEventInfo(oEvent, "Table");
},
tableReorderDragEnter: function(oEvent) {
var oRow = oEvent.getParameter("target");
var oRowContext = oRow.getBindingContext();
var oModelProperty = oRowContext.getModel().getProperty(oRowContext.getPath());
var sStatus = oModelProperty && oModelProperty.objStatusState != null ? oModelProperty.objStatusState : "";
if (sStatus !== "Success") {
oEvent.preventDefault();
}
this.showDragEnterEventInfo(oEvent, "Table Reorder");
},
tableReorderDrop: function(oEvent) {
this.showDropEventInfo(oEvent, "Table Reorder");
},
tableToTreeTableDragEnter: function(oEvent) {
this.showDragEnterEventInfo(oEvent, "Table to TreeTable");
},
tableToTreeTableDrop: function(oEvent) {
this.showDropEventInfo(oEvent, "Table to TreeTable");
},
treeTableDragStart: function(oEvent) {
this.showDragStartEventInfo(oEvent, "TreeTable");
},
treeTableReorderDragEnter: function(oEvent) {
this.showDragEnterEventInfo(oEvent, "TreeTable Reorder");
},
treeTableReorderDrop: function(oEvent) {
this.showDropEventInfo(oEvent, "TreeTable Reorder");
},
treeTableToTableDragEnter: function(oEvent) {
this.showDragEnterEventInfo(oEvent, "TreeTable to Table");
},
treeTableToTableDrop: function(oEvent) {
this.showDropEventInfo(oEvent, "TreeTable to Table");
},
showDragStartEventInfo: function(oEvent, sTitle) {
sap.m.MessageToast.show(
sTitle + " (" + "DragStart parameters" + ")"
+ "\nDrag target: " + oEvent.getParameter("target").getId()
+ "\nDrag session: " + (oEvent.getParameter("dragSession") ? "available" : "not available")
+ "\nBrowser event: " + oEvent.getParameter("browserEvent").type,
{
width: "25rem"
}
);
},
showDragEnterEventInfo: function(oEvent, sTitle) {
sap.m.MessageToast.show(
sTitle + " (" + "DragEnter parameters" + ")"
+ "\nDrop target: " + oEvent.getParameter("target").getId()
+ "\nDrag session: " + (oEvent.getParameter("dragSession") ? "available" : "not available")
+ "\nBrowser event: " + oEvent.getParameter("browserEvent").type,<|fim▁hole|> },
showDropEventInfo: function(oEvent, sTitle) {
sap.m.MessageToast.show(
sTitle + " (" + "Drop parameters" + ")"
+ "\nDragged control: " + oEvent.getParameter("draggedControl").getId()
+ "\nDropped control: " + oEvent.getParameter("droppedControl").getId()
+ "\nDrop position: " + oEvent.getParameter("dropPosition")
+ "\nDrag session: " + (oEvent.getParameter("dragSession") ? "available" : "not available")
+ "\nBrowser event: " + oEvent.getParameter("browserEvent").type,
{
duration: 8000,
width: "25rem"
}
);
},
getProgress: function(sValue) {
sValue = sValue || "";
return (sValue.length * 10) % 100;
},
getRating: function(sValue) {
sValue = sValue || "";
return sValue.length % 5;
}
});
});<|fim▁end|> | {
width: "25rem"
}
); |
<|file_name|>createPath.js<|end_file_name|><|fim▁begin|>VectorCanvas.prototype.createPath = function (config) {
var node;
if (this.mode === 'svg') {
node = this.createSvgNode('path');
node.setAttribute('d', config.path);
if (this.params.borderColor !== null) {
node.setAttribute('stroke', this.params.borderColor);
}
if (this.params.borderWidth > 0) {
node.setAttribute('stroke-width', this.params.borderWidth);
node.setAttribute('stroke-linecap', 'round');
node.setAttribute('stroke-linejoin', 'round');
}
if (this.params.borderOpacity > 0) {
node.setAttribute('stroke-opacity', this.params.borderOpacity);
}
node.setFill = function (color) {
this.setAttribute('fill', color);
if (this.getAttribute('original') === null) {
this.setAttribute('original', color);
}
};
node.getFill = function () {<|fim▁hole|> node.getOriginalFill = function () {
return this.getAttribute('original');
};
node.setOpacity = function (opacity) {
this.setAttribute('fill-opacity', opacity);
};
} else {
node = this.createVmlNode('shape');
node.coordorigin = '0 0';
node.coordsize = this.width + ' ' + this.height;
node.style.width = this.width + 'px';
node.style.height = this.height + 'px';
node.fillcolor = JQVMap.defaultFillColor;
node.stroked = false;
node.path = VectorCanvas.pathSvgToVml(config.path);
var scale = this.createVmlNode('skew');
scale.on = true;
scale.matrix = '0.01,0,0,0.01,0,0';
scale.offset = '0,0';
node.appendChild(scale);
var fill = this.createVmlNode('fill');
node.appendChild(fill);
node.setFill = function (color) {
this.getElementsByTagName('fill')[0].color = color;
if (this.getAttribute('original') === null) {
this.setAttribute('original', color);
}
};
node.getFill = function () {
return this.getElementsByTagName('fill')[0].color;
};
node.getOriginalFill = function () {
return this.getAttribute('original');
};
node.setOpacity = function (opacity) {
this.getElementsByTagName('fill')[0].opacity = parseInt(opacity * 100, 10) + '%';
};
}
return node;
};<|fim▁end|> | return this.getAttribute('fill');
};
|
<|file_name|>features_deviceinusefeature.py<|end_file_name|><|fim▁begin|># pylint: disable=line-too-long, unused-argument
import json
from django.template.loader import render_to_string
def format_reading(probe_name, json_payload):
item = json.loads(json_payload)
status = item['DEVICE_ACTIVE']
if item['DEVICE_ACTIVE'] is True:
status = "Active"
elif item['DEVICE_ACTIVE'] is False:
status = "Inactive"
return status
def visualize(probe_name, readings):
report = []<|fim▁hole|>
timestamp = payload['TIMESTAMP']
device_active = payload['DEVICE_ACTIVE']
if device_active is True:
device_active = 1
elif device_active is False:
device_active = 0
rep_dict = {}
rep_dict["y"] = device_active
rep_dict["x"] = timestamp
report.append(rep_dict)
return render_to_string('visualization_device.html', {'probe_name': probe_name, 'readings': readings, 'device_report': json.dumps(report)})<|fim▁end|> |
for reading in readings:
payload = json.loads(reading.payload) |
<|file_name|>test_memusage.py<|end_file_name|><|fim▁begin|>from test.lib.testing import eq_
from sqlalchemy.orm import mapper, relationship, create_session, \
clear_mappers, sessionmaker, class_mapper
from sqlalchemy.orm.mapper import _mapper_registry
from sqlalchemy.orm.session import _sessions
import operator
from test.lib import testing, engines
from sqlalchemy import MetaData, Integer, String, ForeignKey, \
PickleType, create_engine, Unicode
from test.lib.schema import Table, Column
import sqlalchemy as sa
from sqlalchemy.sql import column
from sqlalchemy.processors import to_decimal_processor_factory, \
to_unicode_processor_factory
from test.lib.util import gc_collect
from sqlalchemy.util.compat import decimal
import gc
import weakref
from test.lib import fixtures
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
def profile_memory(func):
# run the test 50 times. if length of gc.get_objects()
# keeps growing, assert false
def profile(*args):
gc_collect()
samples = [0 for x in range(0, 50)]
for x in range(0, 50):
func(*args)
gc_collect()
samples[x] = len(gc.get_objects())
print "sample gc sizes:", samples
assert len(_sessions) == 0
for x in samples[-4:]:
if x != samples[-5]:
flatline = False
break
else:
flatline = True
# object count is bigger than when it started
if not flatline and samples[-1] > samples[0]:
for x in samples[1:-2]:
# see if a spike bigger than the endpoint exists
if x > samples[-1]:
break
else:
assert False, repr(samples) + " " + repr(flatline)
return profile
def assert_no_mappers():
clear_mappers()
gc_collect()
assert len(_mapper_registry) == 0
class EnsureZeroed(fixtures.ORMTest):
def setup(self):
_sessions.clear()
_mapper_registry.clear()
class MemUsageTest(EnsureZeroed):
__requires__ = 'cpython',
# ensure a pure growing test trips the assertion
@testing.fails_if(lambda: True)
def test_fixture(self):
class Foo(object):
pass
x = []
@profile_memory
def go():
x[-1:] = [Foo(), Foo(), Foo(), Foo(), Foo(), Foo()]
go()
def test_session(self):
metadata = MetaData(testing.db)
table1 = Table("mytable", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30)))
table2 = Table("mytable2", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30)),
Column('col3', Integer, ForeignKey("mytable.col1")))
metadata.create_all()
m1 = mapper(A, table1, properties={
"bs":relationship(B, cascade="all, delete",
order_by=table2.c.col1)},
order_by=table1.c.col1)
m2 = mapper(B, table2)
m3 = mapper(A, table1, non_primary=True)
@profile_memory
def go():
sess = create_session()
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1,a2,a3]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")])
],
alist)
for a in alist:
sess.delete(a)
sess.flush()
go()
metadata.drop_all()
del m1, m2, m3
assert_no_mappers()
@testing.crashes('sqlite', ':memory: connection not suitable here')
def test_orm_many_engines(self):
metadata = MetaData(testing.db)
table1 = Table("mytable", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30)))
table2 = Table("mytable2", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30)),
Column('col3', Integer, ForeignKey("mytable.col1")))
metadata.create_all()
m1 = mapper(A, table1, properties={
"bs":relationship(B, cascade="all, delete",
order_by=table2.c.col1)},
order_by=table1.c.col1,
_compiled_cache_size=10
)
m2 = mapper(B, table2,
_compiled_cache_size=10
)
m3 = mapper(A, table1, non_primary=True)
@profile_memory
def go():
engine = engines.testing_engine(
options={'logging_name':'FOO',
'pool_logging_name':'BAR',
'use_reaper':False}
)
sess = create_session(bind=engine)
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1,a2,a3]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")])
],
alist)
for a in alist:
sess.delete(a)
sess.flush()
sess.close()
engine.dispose()
go()
metadata.drop_all()
del m1, m2, m3
assert_no_mappers()
def test_ad_hoc_types(self):
"""test storage of bind processors, result processors
in dialect-wide registry."""
from sqlalchemy.dialects import mysql, postgresql, sqlite
from sqlalchemy import types
eng = engines.testing_engine()
for args in (
(types.Integer, ),
(types.String, ),
(types.PickleType, ),
(types.Enum, 'a', 'b', 'c'),
(sqlite.DATETIME, ),
(postgresql.ENUM, 'a', 'b', 'c'),
(types.Interval, ),
(postgresql.INTERVAL, ),
(mysql.VARCHAR, ),
):
@profile_memory
def go():
type_ = args[0](*args[1:])
bp = type_._cached_bind_processor(eng.dialect)
rp = type_._cached_result_processor(eng.dialect, 0)
go()
assert not eng.dialect._type_memos
def test_many_updates(self):
metadata = MetaData(testing.db)
wide_table = Table('t', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
*[Column('col%d' % i, Integer) for i in range(10)]
)
class Wide(object):
pass
mapper(Wide, wide_table, _compiled_cache_size=10)
metadata.create_all()
session = create_session()
w1 = Wide()
session.add(w1)
session.flush()
session.close()
del session
counter = [1]
@profile_memory
def go():
session = create_session()
w1 = session.query(Wide).first()
x = counter[0]
dec = 10
while dec > 0:
# trying to count in binary here,
# works enough to trip the test case
if pow(2, dec) < x:
setattr(w1, 'col%d' % dec, counter[0])
x -= pow(2, dec)
dec -= 1
session.flush()
session.close()
counter[0] += 1
try:
go()
finally:
metadata.drop_all()
@testing.fails_if(lambda : testing.db.dialect.name == 'sqlite' \
and testing.db.dialect.dbapi.version_info >= (2,
5),
'Newer pysqlites generate warnings here too and '
'have similar issues.')
def test_unicode_warnings(self):
metadata = MetaData(testing.db)
table1 = Table('mytable', metadata, Column('col1', Integer,
primary_key=True,
test_needs_autoincrement=True), Column('col2',
Unicode(30)))
metadata.create_all()
i = [1]
@testing.emits_warning()
@profile_memory
def go():
# execute with a non-unicode object. a warning is emitted,
# this warning shouldn't clog up memory.
testing.db.execute(table1.select().where(table1.c.col2
== 'foo%d' % i[0]))
i[0] += 1
try:
go()
finally:
metadata.drop_all()
def test_mapper_reset(self):
metadata = MetaData(testing.db)
table1 = Table("mytable", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30)))
table2 = Table("mytable2", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30)),
Column('col3', Integer, ForeignKey("mytable.col1")))
@profile_memory
def go():
m1 = mapper(A, table1, properties={
"bs":relationship(B, order_by=table2.c.col1)
})
m2 = mapper(B, table2)
m3 = mapper(A, table1, non_primary=True)
sess = create_session()
a1 = A(col2="a1")
a2 = A(col2="a2")
a3 = A(col2="a3")
a1.bs.append(B(col2="b1"))
a1.bs.append(B(col2="b2"))
a3.bs.append(B(col2="b3"))
for x in [a1,a2,a3]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(col2="a1", bs=[B(col2="b1"), B(col2="b2")]),
A(col2="a2", bs=[]),
A(col2="a3", bs=[B(col2="b3")])
],
alist)
for a in alist:
sess.delete(a)
sess.flush()
sess.close()
clear_mappers()
metadata.create_all()
try:
go()
finally:
metadata.drop_all()
assert_no_mappers()
def test_with_inheritance(self):
metadata = MetaData(testing.db)
table1 = Table("mytable", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30))
)
table2 = Table("mytable2", metadata,
Column('col1', Integer, ForeignKey('mytable.col1'),
primary_key=True, test_needs_autoincrement=True),
Column('col3', String(30)),
)
@profile_memory
def go():
class A(fixtures.ComparableEntity):
pass
class B(A):
pass
mapper(A, table1,
polymorphic_on=table1.c.col2,
polymorphic_identity='a')
mapper(B, table2,
inherits=A,
polymorphic_identity='b')
sess = create_session()
a1 = A()
a2 = A()
b1 = B(col3='b1')
b2 = B(col3='b2')
for x in [a1,a2,b1, b2]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(), A(), B(col3='b1'), B(col3='b2')
],
alist)
for a in alist:
sess.delete(a)
sess.flush()
# dont need to clear_mappers()
del B
del A
metadata.create_all()
try:
go()
finally:
metadata.drop_all()
assert_no_mappers()
def test_with_manytomany(self):
metadata = MetaData(testing.db)
table1 = Table("mytable", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30))
)
table2 = Table("mytable2", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', String(30)),
)
table3 = Table('t1tot2', metadata,
Column('t1', Integer, ForeignKey('mytable.col1')),
Column('t2', Integer, ForeignKey('mytable2.col1')),
)
@profile_memory
def go():
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
mapper(A, table1, properties={
'bs':relationship(B, secondary=table3,
backref='as', order_by=table3.c.t1)
})
mapper(B, table2)
sess = create_session()
a1 = A(col2='a1')<|fim▁hole|> a1.bs.append(b1)
a2.bs.append(b2)
for x in [a1,a2]:
sess.add(x)
sess.flush()
sess.expunge_all()
alist = sess.query(A).order_by(A.col1).all()
eq_(
[
A(bs=[B(col2='b1')]), A(bs=[B(col2='b2')])
],
alist)
for a in alist:
sess.delete(a)
sess.flush()
# dont need to clear_mappers()
del B
del A
metadata.create_all()
try:
go()
finally:
metadata.drop_all()
assert_no_mappers()
@testing.fails_if(lambda : testing.db.dialect.name == 'sqlite' \
and testing.db.dialect.dbapi.version > '2.5')
@testing.provide_metadata
def test_key_fallback_result(self):
e = testing.db
m = self.metadata
t = Table('t', m, Column('x', Integer), Column('y', Integer))
m.create_all(e)
e.execute(t.insert(), {"x":1, "y":1})
@profile_memory
def go():
r = e.execute(t.alias().select())
for row in r:
row[t.c.x]
go()
# fails on newer versions of pysqlite due to unusual memory behvior
# in pysqlite itself. background at:
# http://thread.gmane.org/gmane.comp.python.db.pysqlite.user/2290
@testing.fails_if(lambda : testing.db.dialect.name == 'sqlite' \
and testing.db.dialect.dbapi.version > '2.5')
def test_join_cache(self):
metadata = MetaData(testing.db)
table1 = Table('table1', metadata, Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True), Column('data',
String(30)))
table2 = Table('table2', metadata, Column('id', Integer,
primary_key=True,
test_needs_autoincrement=True), Column('data',
String(30)), Column('t1id', Integer,
ForeignKey('table1.id')))
class Foo(object):
pass
class Bar(object):
pass
mapper(Foo, table1, properties={'bars'
: relationship(mapper(Bar, table2))})
metadata.create_all()
session = sessionmaker()
@profile_memory
def go():
s = table2.select()
sess = session()
sess.query(Foo).join((s, Foo.bars)).all()
sess.rollback()
try:
go()
finally:
metadata.drop_all()
def test_mutable_identity(self):
metadata = MetaData(testing.db)
table1 = Table("mytable", metadata,
Column('col1', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('col2', PickleType(comparator=operator.eq, mutable=True))
)
class Foo(object):
def __init__(self, col2):
self.col2 = col2
mapper(Foo, table1)
metadata.create_all()
session = sessionmaker()()
def go():
obj = [
Foo({'a':1}),
Foo({'b':1}),
Foo({'c':1}),
Foo({'d':1}),
Foo({'e':1}),
Foo({'f':1}),
Foo({'g':1}),
Foo({'h':1}),
Foo({'i':1}),
Foo({'j':1}),
Foo({'k':1}),
Foo({'l':1}),
]
session.add_all(obj)
session.commit()
testing.eq_(len(session.identity_map._mutable_attrs), 12)
testing.eq_(len(session.identity_map), 12)
obj = None
gc_collect()
testing.eq_(len(session.identity_map._mutable_attrs), 0)
testing.eq_(len(session.identity_map), 0)
try:
go()
finally:
metadata.drop_all()
def test_type_compile(self):
from sqlalchemy.dialects.sqlite.base import dialect as SQLiteDialect
cast = sa.cast(column('x'), sa.Integer)
@profile_memory
def go():
dialect = SQLiteDialect()
cast.compile(dialect=dialect)
go()
@testing.requires.cextensions
def test_DecimalResultProcessor_init(self):
@profile_memory
def go():
to_decimal_processor_factory({}, 10)
go()
@testing.requires.cextensions
def test_DecimalResultProcessor_process(self):
@profile_memory
def go():
to_decimal_processor_factory(decimal.Decimal, 10)(1.2)
go()
@testing.requires.cextensions
def test_UnicodeResultProcessor_init(self):
@profile_memory
def go():
to_unicode_processor_factory('utf8')
go()<|fim▁end|> | a2 = A(col2='a2')
b1 = B(col2='b1')
b2 = B(col2='b2') |
<|file_name|>issue-85581.rs<|end_file_name|><|fim▁begin|>// Regression test of #85581.
// Checks not to suggest to add `;` when the second mutable borrow
// is in the first's scope.
use std::collections::BinaryHeap;<|fim▁hole|>
fn foo(heap: &mut BinaryHeap<i32>) {
match heap.peek_mut() {
Some(_) => { heap.pop(); },
//~^ ERROR: cannot borrow `*heap` as mutable more than once at a time
None => (),
}
}
fn main() {}<|fim▁end|> | |
<|file_name|>test_steps__outputs_file.py<|end_file_name|><|fim▁begin|>from daisychain.steps.outputs.file import OutputFile
from daisychain.steps.input import InMemoryInput
import tempfile
import os
TEST_STRING = 'THIS OUTPUT STRING IS COMPLETELY UNIQUE AND WILL NOT EXIST EVER AGAIN'
def test_output_file():
t = tempfile.NamedTemporaryFile(dir=os.path.dirname(__file__), delete=False)<|fim▁hole|> assert i.pending
i.run()
assert i.finished
with open(t.name) as f:
assert TEST_STRING in f.read()
finally:
if os.path.exists(t.name):
os.unlink(t.name)
def test_output_failure():
i = OutputFile(path='/thisdirectoryreallydoesnotexist', input_step=InMemoryInput(output=TEST_STRING))
assert i.pending
try:
i.run()
except Exception as e:
pass
else:
assert False, "Trying to output to a directory that doesn't exist should fail"<|fim▁end|> | t.close()
try:
i = OutputFile(path=t.name, input_step=InMemoryInput(output=TEST_STRING)) |
<|file_name|>zone_sectag.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3.2
#
# Copyright (c) Net24 Limited, Christchurch, New Zealand 2011-2012
# and Voyager Internet Ltd, New Zealand, 2012-2013
#
# This file is part of py-magcode-core.
#
# Py-magcode-core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Py-magcode-core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with py-magcode-core. If not, see <http://www.gnu.org/licenses/>.
#
"""
Zone security tag class, corresponding to zone_sectags table
"""
from sqlalchemy.orm.exc import NoResultFound
from magcode.core.database import *
from dms.exceptions import ZoneSecTagExists
from dms.exceptions import ZoneSecTagDoesNotExist
from dms.exceptions import ZoneSecTagStillUsed
@saregister
class ZoneSecTag(object):
"""
DNS Resource Record comment.
Comparison methods are also used for sorting displayed output.
"""
_table="zone_sectags"
def __init__(self, sectag_label=None):
"""
Initialise a security tag comment
"""
self.sectag = sectag_label
# For comparison purposes, including display!
def __eq__(self, other):
return self.sectag == other.sectag
def __ne__(self, other):
return self.sectag != other.sectag
def __lt__(self, other):
return self.sectag < other.sectag
def __gt__(self, other):
return self.sectag > other.sectag
def __le__(self, other):
return self.sectag <= other.sectag
def __ge__(self, other):
return self.sectag >= other.sectag
def __str__(self):
"""
Print out sectag name<|fim▁hole|> """
Output for zone engine.
"""
return {'zone_id': self.sectag, 'sectag_label': self.sectag}
def to_engine_brief(self, time_format=None):
"""
Brief output for zone_engine
"""
return {'sectag_label': self.sectag}
def new_sectag(db_session, sectag_label):
"""
Create a new sectag type
"""
if sectag_label == settings['admin_sectag']:
raise ZoneSecTagExists(sectag_label)
zone_sectag = ZoneSecTag(sectag_label)
try:
sectag_list = db_session.query(ZoneSecTag)\
.filter(ZoneSecTag.zone_id == None)\
.filter(ZoneSecTag.sectag == sectag_label).all()
if len(sectag_list):
raise ZoneSecTagExists(sectag_label)
except NoResultFound:
pass
db_session.add(zone_sectag)
db_session.flush()
return zone_sectag
def del_sectag(db_session, sectag_label):
"""
Delete a sectag label
"""
if sectag_label == settings['admin_sectag']:
raise ZoneSecTagStillUsed(sectag_label)
zone_sectag = ZoneSecTag(sectag_label)
try:
zone_sectag = db_session.query(ZoneSecTag)\
.filter(ZoneSecTag.zone_id == None)\
.filter(ZoneSecTag.sectag == sectag_label).one()
except NoResultFound:
raise ZoneSecTagDoesNotExist(sectag_label)
# Check that it is no longer being used.
try:
in_use_count = db_session.query(ZoneSecTag.sectag)\
.filter(ZoneSecTag.zone_id != None)\
.filter(ZoneSecTag.sectag == sectag_label).count()
if in_use_count:
raise ZoneSecTagStillUsed(sectag_label)
except NoResultFound:
pass
db_session.delete(zone_sectag)
db_session.flush()
del zone_sectag
def list_all_sectags(db_session):
"""
Return list of all sectags
"""
zone_sectags = [ZoneSecTag(settings['admin_sectag'])]
try:
zone_sectags.extend(db_session.query(ZoneSecTag)\
.filter(ZoneSecTag.zone_id == None).all())
except NoResultFound:
return zone_sectags
return zone_sectags
def list_all_sectag_labels(db_session):
"""
Return a list of all the sectag labels
"""
zone_sectag_labels = [settings['admin_sectag']]
try:
zone_sectag_label_list = db_session.query(ZoneSecTag.sectag)\
.filter(ZoneSecTag.zone_id == None).all()
except NoResultFound:
pass
zone_sectag_labels.extend([x[0] for x in zone_sectag_label_list])
return zone_sectag_labels<|fim▁end|> | """
return str(self.sectag)
def to_engine(self, time_format=None): |
<|file_name|>component.js<|end_file_name|><|fim▁begin|>import Ember from 'ember';
export default Ember.Component.extend({
tagName: 'tr',
classNames: ['clickable'],
<|fim▁hole|> click() {
this.sendAction('action', this.imaging);
}
});<|fim▁end|> | |
<|file_name|>SnakePropertiesBuilder.java<|end_file_name|><|fim▁begin|><|fim▁hole|>public class SnakePropertiesBuilder extends MonsterProperties.Builder
{
@Override
public MonsterProperties build()
{
experienceGain(100)
.hp(100)
.strength(5)
.level(1);
return super.build();
}
}<|fim▁end|> | package pl.mmorpg.prototype.server.objects.monsters.properties.builders;
import pl.mmorpg.prototype.clientservercommon.packets.monsters.properties.MonsterProperties;
|
<|file_name|>manage.py<|end_file_name|><|fim▁begin|>#encoding:utf-8
from user import app
<|fim▁hole|><|fim▁end|> |
if __name__ == '__main__':
app.run(host='0.0.0.0',port=9002,debug=True) |
<|file_name|>output.js<|end_file_name|><|fim▁begin|><|fim▁hole|>var img = NotReact.createElement( 'img', { src: "foo.gif" });<|fim▁end|> | |
<|file_name|>pointing.mako.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<% data.new_style_struct("Pointing", inherited=True, gecko_name="UserInterface") %>
<%helpers:longhand name="cursor" boxed="${product == 'gecko'}" animation_value_type="discrete"
spec="https://drafts.csswg.org/css-ui/#cursor">
pub use self::computed_value::T as SpecifiedValue;
#[cfg(feature = "gecko")]
use values::specified::url::SpecifiedUrl;
pub mod computed_value {
#[cfg(feature = "gecko")]
use std::fmt;
#[cfg(feature = "gecko")]
use style_traits::ToCss;
use style_traits::cursor::Cursor;
#[cfg(feature = "gecko")]
use values::specified::url::SpecifiedUrl;
#[cfg_attr(feature = "gecko", derive(MallocSizeOf))]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
#[derive(Clone, Copy, Debug, PartialEq, ToComputedValue, ToCss)]
pub enum Keyword {
Auto,
Cursor(Cursor),
}
#[cfg(not(feature = "gecko"))]
pub type T = Keyword;
#[cfg(feature = "gecko")]
#[derive(Clone, Debug, MallocSizeOf, PartialEq, ToComputedValue)]
pub struct Image {
pub url: SpecifiedUrl,
pub hotspot: Option<(f32, f32)>,
}
#[cfg(feature = "gecko")]
#[derive(Clone, Debug, MallocSizeOf, PartialEq, ToComputedValue)]
pub struct T {
pub images: Vec<Image>,
pub keyword: Keyword,
}
#[cfg(feature = "gecko")]
impl ToCss for Image {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
self.url.to_css(dest)?;
if let Some((x, y)) = self.hotspot {
dest.write_str(" ")?;
x.to_css(dest)?;
dest.write_str(" ")?;
y.to_css(dest)?;
}
Ok(())
}
}
#[cfg(feature = "gecko")]
impl ToCss for T {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
for url in &self.images {
url.to_css(dest)?;
dest.write_str(", ")?;
}
self.keyword.to_css(dest)
}
}
}
#[cfg(not(feature = "gecko"))]
#[inline]
pub fn get_initial_value() -> computed_value::T {
computed_value::Keyword::Auto
}
#[cfg(feature = "gecko")]
#[inline]
pub fn get_initial_value() -> computed_value::T {
computed_value::T {
images: vec![],
keyword: computed_value::Keyword::Auto
}
}
impl Parse for computed_value::Keyword {
fn parse<'i, 't>(_context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<computed_value::Keyword, ParseError<'i>> {
use std::ascii::AsciiExt;
use style_traits::cursor::Cursor;
let ident = input.expect_ident()?;
if ident.eq_ignore_ascii_case("auto") {
Ok(computed_value::Keyword::Auto)
} else {
Cursor::from_css_keyword(&ident)<|fim▁hole|> }
#[cfg(feature = "gecko")]
fn parse_image<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<computed_value::Image, ParseError<'i>> {
Ok(computed_value::Image {
url: SpecifiedUrl::parse(context, input)?,
hotspot: match input.try(|input| input.expect_number()) {
Ok(number) => Some((number, input.expect_number()?)),
Err(_) => None,
},
})
}
#[cfg(not(feature = "gecko"))]
pub fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<SpecifiedValue, ParseError<'i>> {
computed_value::Keyword::parse(context, input)
}
/// cursor: [<url> [<number> <number>]?]# [auto | default | ...]
#[cfg(feature = "gecko")]
pub fn parse<'i, 't>(context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<SpecifiedValue, ParseError<'i>> {
let mut images = vec![];
loop {
match input.try(|input| parse_image(context, input)) {
Ok(mut image) => {
image.url.build_image_value();
images.push(image)
}
Err(_) => break,
}
input.expect_comma()?;
}
Ok(computed_value::T {
images: images,
keyword: computed_value::Keyword::parse(context, input)?,
})
}
</%helpers:longhand>
// NB: `pointer-events: auto` (and use of `pointer-events` in anything that isn't SVG, in fact)
// is nonstandard, slated for CSS4-UI.
// TODO(pcwalton): SVG-only values.
${helpers.single_keyword("pointer-events", "auto none", animation_value_type="discrete",
extra_gecko_values="visiblepainted visiblefill visiblestroke visible painted fill stroke all",
flags="APPLIES_TO_PLACEHOLDER",
spec="https://www.w3.org/TR/SVG11/interact.html#PointerEventsProperty")}
${helpers.single_keyword("-moz-user-input", "auto none enabled disabled",
products="gecko", gecko_ffi_name="mUserInput",
gecko_enum_prefix="StyleUserInput",
animation_value_type="discrete",
spec="Nonstandard (https://developer.mozilla.org/en-US/docs/Web/CSS/-moz-user-input)")}
${helpers.single_keyword("-moz-user-modify", "read-only read-write write-only",
products="gecko", gecko_ffi_name="mUserModify",
gecko_enum_prefix="StyleUserModify",
needs_conversion=True,
animation_value_type="discrete",
spec="Nonstandard (https://developer.mozilla.org/en-US/docs/Web/CSS/-moz-user-modify)")}
${helpers.single_keyword("-moz-user-focus",
"none ignore normal select-after select-before select-menu select-same select-all",
products="gecko", gecko_ffi_name="mUserFocus",
gecko_enum_prefix="StyleUserFocus",
animation_value_type="discrete",
spec="Nonstandard (https://developer.mozilla.org/en-US/docs/Web/CSS/-moz-user-focus)")}
${helpers.predefined_type(
"caret-color",
"ColorOrAuto",
"Either::Second(Auto)",
spec="https://drafts.csswg.org/css-ui/#caret-color",
animation_value_type="Either<AnimatedColor, Auto>",
boxed=True,
ignored_when_colors_disabled=True,
products="gecko",
)}<|fim▁end|> | .map(computed_value::Keyword::Cursor)
.map_err(|()| SelectorParseError::UnexpectedIdent(ident.clone()).into())
}
} |
<|file_name|>gl.js<|end_file_name|><|fim▁begin|>//! moment.js locale configuration
//! locale : Galician [gl]
//! author : Juan G. Hurtado : https://github.com/juanghurtado
;(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined'
&& typeof require === 'function' ? factory(require('../moment')) :
typeof define === 'function' && define.amd ? define(['../moment'], factory) :
factory(global.moment)
}(this, function (moment) { 'use strict';
var gl = moment.defineLocale('gl', {
months : 'xaneiro_febreiro_marzo_abril_maio_xuño_xullo_agosto_setembro_outubro_novembro_decembro'.split('_'),
monthsShort : 'xan._feb._mar._abr._mai._xuñ._xul._ago._set._out._nov._dec.'.split('_'),
monthsParseExact: true,
weekdays : 'domingo_luns_martes_mércores_xoves_venres_sábado'.split('_'),
weekdaysShort : 'dom._lun._mar._mér._xov._ven._sáb.'.split('_'),
weekdaysMin : 'do_lu_ma_mé_xo_ve_sá'.split('_'),
weekdaysParseExact : true,
longDateFormat : {
LT : 'H:mm',
LTS : 'H:mm:ss',
L : 'DD/MM/YYYY',
LL : 'D [de] MMMM [de] YYYY',
LLL : 'D [de] MMMM [de] YYYY H:mm',
LLLL : 'dddd, D [de] MMMM [de] YYYY H:mm'
},
calendar : {
sameDay : function () {
return '[hoxe ' + ((this.hours() !== 1) ? 'ás' : 'á') + '] LT';
},
nextDay : function () {
return '[mañá ' + ((this.hours() !== 1) ? 'ás' : 'á') + '] LT';
},
nextWeek : function () {
return 'dddd [' + ((this.hours() !== 1) ? 'ás' : 'a') + '] LT';
},
lastDay : function () {
<|fim▁hole|> lastWeek : function () {
return '[o] dddd [pasado ' + ((this.hours() !== 1) ? 'ás' : 'a') + '] LT';
},
sameElse : 'L'
},
relativeTime : {
future : function (str) {
if (str.indexOf('un') === 0) {
return 'n' + str;
}
return 'en ' + str;
},
past : 'hai %s',
s : 'uns segundos',
m : 'un minuto',
mm : '%d minutos',
h : 'unha hora',
hh : '%d horas',
d : 'un día',
dd : '%d días',
M : 'un mes',
MM : '%d meses',
y : 'un ano',
yy : '%d anos'
},
ordinalParse : /\d{1,2}º/,
ordinal : '%dº',
week : {
dow : 1, // Monday is the first day of the week.
doy : 4 // The week that contains Jan 4th is the first week of the year.
}
});
return gl;
}));<|fim▁end|> | return '[onte ' + ((this.hours() !== 1) ? 'á' : 'a') + '] LT';
},
|
<|file_name|>manager.py<|end_file_name|><|fim▁begin|># Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
The Manager runs a series of tests (TestType interface) against a set
of test files. If a test file fails a TestType, it returns a list of TestFailure
objects to the Manager. The Manager then aggregates the TestFailures to
create a final report.
"""
import datetime
import json
import logging
import random
import sys
import time
from webkitpy.common.net.file_uploader import FileUploader
from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
from webkitpy.layout_tests.layout_package import json_results_generator
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.models.test_input import TestInput
from webkitpy.tool import grammar
_log = logging.getLogger(__name__)
# Builder base URL where we have the archived test results.
BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
TestExpectations = test_expectations.TestExpectations
class Manager(object):
"""A class for managing running a series of tests on a series of layout
test files."""
def __init__(self, port, options, printer):
"""Initialize test runner data structures.
Args:
port: an object implementing port-specific
options: a dictionary of command line options
printer: a Printer object to record updates to.
"""
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._printer = printer
self._expectations = None
self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
self.INSPECTOR_SUBDIR = 'inspector' + port.TEST_PATH_SEPARATOR
self.PERF_SUBDIR = 'perf'
self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
self.VIRTUAL_HTTP_SUBDIR = port.TEST_PATH_SEPARATOR.join([
'virtual', 'stable', 'http'])
self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
self.ARCHIVED_RESULTS_LIMIT = 25
self._http_server_started = False
self._wptserve_started = False
self._websockets_server_started = False
self._results_directory = self._port.results_directory()
self._finder = LayoutTestFinder(self._port, self._options)
self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
def _collect_tests(self, args):
return self._finder.find_tests(args, test_list=self._options.test_list,
fastest_percentile=self._options.fastest)
def _is_http_test(self, test):
return (
test.startswith(self.HTTP_SUBDIR) or
self._is_websocket_test(test) or
self.VIRTUAL_HTTP_SUBDIR in test
)
def _is_inspector_test(self, test):
return self.INSPECTOR_SUBDIR in test
def _is_websocket_test(self, test):
if self._port.is_wpt_enabled() and self._port.is_wpt_test(test):
return False
return self.WEBSOCKET_SUBDIR in test
def _http_tests(self, test_names):
return set(test for test in test_names if self._is_http_test(test))
def _is_perf_test(self, test):
return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test
def _prepare_lists(self, paths, test_names):
tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
tests_to_run = [test for test in test_names if test not in tests_to_skip]
if not tests_to_run:
return tests_to_run, tests_to_skip
# Create a sorted list of test files so the subset chunk,
# if used, contains alphabetically consecutive tests.
if self._options.order == 'natural':
tests_to_run.sort(key=self._port.test_key)
elif self._options.order == 'random':
random.shuffle(tests_to_run)
elif self._options.order == 'random-seeded':
rnd = random.Random()
rnd.seed(4) # http://xkcd.com/221/
rnd.shuffle(tests_to_run)
tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
self._expectations.add_extra_skipped_tests(tests_in_other_chunks)
tests_to_skip.update(tests_in_other_chunks)
return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file):
return TestInput(test_file,
self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
self._test_requires_lock(test_file),
should_add_missing_baselines=(self._options.new_test_results and not self._test_is_expected_missing(test_file)))
def _test_requires_lock(self, test_file):
"""Return True if the test needs to be locked when
running multiple copies of NRWTs. Perf tests are locked
because heavy load caused by running other tests in parallel
might cause some of them to timeout."""
return self._is_http_test(test_file) or self._is_perf_test(test_file)
def _test_is_expected_missing(self, test_file):
expectations = self._expectations.model().get_expectations(test_file)
return test_expectations.MISSING in expectations or test_expectations.NEEDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in expectations
def _test_is_slow(self, test_file):
return test_expectations.SLOW in self._expectations.model().get_expectations(test_file)
def needs_servers(self, test_names):
return any(self._test_requires_lock(test_name) for test_name in test_names)
def _rename_results_folder(self):
try:
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime(self._filesystem.mtime(self._filesystem.join(self._results_directory, "results.html"))))
except (IOError, OSError), e:
# It might be possible that results.html was not generated in previous run, because the test
# run was interrupted even before testing started. In those cases, don't archive the folder.
# Simply override the current folder contents with new results.
import errno
if e.errno == errno.EEXIST or e.errno == errno.ENOENT:
self._printer.write_update("No results.html file found in previous run, skipping it.")
return None
archived_name = ''.join((self._filesystem.basename(self._results_directory), "_", timestamp))
archived_path = self._filesystem.join(self._filesystem.dirname(self._results_directory), archived_name)
self._filesystem.move(self._results_directory, archived_path)
def _delete_dirs(self, dir_list):
for dir in dir_list:
self._filesystem.rmtree(dir)
def _limit_archived_results_count(self):
results_directory_path = self._filesystem.dirname(self._results_directory)
file_list = self._filesystem.listdir(results_directory_path)
results_directories = []
for dir in file_list:
file_path = self._filesystem.join(results_directory_path, dir)
if self._filesystem.isdir(file_path) and self._results_directory in file_path:
results_directories.append(file_path)
results_directories.sort(key=lambda x: self._filesystem.mtime(x))
self._printer.write_update("Clobbering excess archived results in %s" % results_directory_path)
self._delete_dirs(results_directories[:-self.ARCHIVED_RESULTS_LIMIT])
def _set_up_run(self, test_names):
self._printer.write_update("Checking build ...")
if self._options.build:
exit_code = self._port.check_build(self.needs_servers(test_names), self._printer)
if exit_code:
_log.error("Build check failed")
return exit_code
# This must be started before we check the system dependencies,
# since the helper may do things to make the setup correct.
if self._options.pixel_tests:
self._printer.write_update("Starting pixel test helper ...")
self._port.start_helper()
# Check that the system dependencies (themes, fonts, ...) are correct.
if not self._options.nocheck_sys_deps:
self._printer.write_update("Checking system dependencies ...")
exit_code = self._port.check_sys_deps(self.needs_servers(test_names))
if exit_code:
self._port.stop_helper()
return exit_code
if self._options.clobber_old_results:
self._clobber_old_results()
elif self._filesystem.exists(self._results_directory):
self._limit_archived_results_count()
# Rename the existing results folder for archiving.
self._rename_results_folder()
# Create the output directory if it doesn't already exist.
self._port.host.filesystem.maybe_make_directory(self._results_directory)
self._port.setup_test_run()
return test_run_results.OK_EXIT_STATUS
def run(self, args):
"""Run the tests and return a RunDetails object with the results."""
start_time = time.time()
self._printer.write_update("Collecting tests ...")
running_all_tests = False
try:
paths, test_names, running_all_tests = self._collect_tests(args)
except IOError:
# This is raised if --test-list doesn't exist
return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)
self._printer.write_update("Parsing expectations ...")
self._expectations = test_expectations.TestExpectations(self._port, test_names)
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
# Check to make sure we're not skipping every test.
if not tests_to_run:
_log.critical('No tests to run.')
return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)
exit_code = self._set_up_run(tests_to_run)
if exit_code:
return test_run_results.RunDetails(exit_code=exit_code)
# Don't retry failures if an explicit list of tests was passed in.
if self._options.retry_failures is None:
should_retry_failures = len(paths) < len(test_names)
else:
should_retry_failures = self._options.retry_failures
enabled_pixel_tests_in_retry = False
try:
self._start_servers(tests_to_run)
num_workers = self._port.num_workers(int(self._options.child_processes))
initial_results = self._run_tests(
tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
num_workers)
# Don't retry failures when interrupted by user or failures limit exception.
should_retry_failures = should_retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
tests_to_retry = self._tests_to_retry(initial_results)
all_retry_results = []
if should_retry_failures and tests_to_retry:
enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
for retry_attempt in xrange(1, self._options.num_retries + 1):
if not tests_to_retry:
break
_log.info('')
_log.info('Retrying %s, attempt %d of %d...' %
(grammar.pluralize('unexpected failure', len(tests_to_retry)),
retry_attempt, self._options.num_retries))
retry_results = self._run_tests(tests_to_retry,
tests_to_skip=set(),
repeat_each=1,
iterations=1,
num_workers=num_workers,
retry_attempt=retry_attempt)
all_retry_results.append(retry_results)
tests_to_retry = self._tests_to_retry(retry_results)
if enabled_pixel_tests_in_retry:
self._options.pixel_tests = False
finally:
self._stop_servers()
self._clean_up_run()
# Some crash logs can take a long time to be written out so look
# for new logs after the test run finishes.
self._printer.write_update("looking for new crash logs")
self._look_for_new_crash_logs(initial_results, start_time)
for retry_attempt_results in all_retry_results:<|fim▁hole|> _log.debug("summarizing results")
summarized_full_results = test_run_results.summarize_results(
self._port, self._expectations, initial_results, all_retry_results,
enabled_pixel_tests_in_retry)
summarized_failing_results = test_run_results.summarize_results(
self._port, self._expectations, initial_results, all_retry_results,
enabled_pixel_tests_in_retry, only_include_failing=True)
exit_code = summarized_failing_results['num_regressions']
if exit_code > test_run_results.MAX_FAILURES_EXIT_STATUS:
_log.warning('num regressions (%d) exceeds max exit status (%d)' %
(exit_code, test_run_results.MAX_FAILURES_EXIT_STATUS))
exit_code = test_run_results.MAX_FAILURES_EXIT_STATUS
if not self._options.dry_run:
self._write_json_files(summarized_full_results, summarized_failing_results, initial_results, running_all_tests)
if self._options.write_full_results_to:
self._filesystem.copyfile(self._filesystem.join(self._results_directory, "full_results.json"),
self._options.write_full_results_to)
self._upload_json_files()
results_path = self._filesystem.join(self._results_directory, "results.html")
self._copy_results_html_file(results_path)
if initial_results.keyboard_interrupted:
exit_code = test_run_results.INTERRUPTED_EXIT_STATUS
else:
if initial_results.interrupted:
exit_code = test_run_results.EARLY_EXIT_STATUS
if self._options.show_results and (exit_code or (self._options.full_results_html and initial_results.total_failures)):
self._port.show_results_html_file(results_path)
self._printer.print_results(time.time() - start_time, initial_results, summarized_failing_results)
self._check_for_stale_w3c_dir()
return test_run_results.RunDetails(
exit_code, summarized_full_results, summarized_failing_results,
initial_results, all_retry_results, enabled_pixel_tests_in_retry)
def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations,
num_workers, retry_attempt=0):
test_inputs = []
for _ in xrange(iterations):
for test in tests_to_run:
for _ in xrange(repeat_each):
test_inputs.append(self._test_input_for_file(test))
return self._runner.run_tests(self._expectations, test_inputs,
tests_to_skip, num_workers, retry_attempt)
def _start_servers(self, tests_to_run):
if self._port.is_wpt_enabled() and any(self._port.is_wpt_test(test) for test in tests_to_run):
self._printer.write_update('Starting WPTServe ...')
self._port.start_wptserve()
self._wptserve_started = True
if self._port.requires_http_server() or any((self._is_http_test(test) or self._is_inspector_test(test)) for test in tests_to_run):
self._printer.write_update('Starting HTTP server ...')
self._port.start_http_server(additional_dirs={}, number_of_drivers=self._options.max_locked_shards)
self._http_server_started = True
if any(self._is_websocket_test(test) for test in tests_to_run):
self._printer.write_update('Starting WebSocket server ...')
self._port.start_websocket_server()
self._websockets_server_started = True
def _stop_servers(self):
if self._wptserve_started:
self._printer.write_update('Stopping WPTServe ...')
self._wptserve_started = False
self._port.stop_wptserve()
if self._http_server_started:
self._printer.write_update('Stopping HTTP server ...')
self._http_server_started = False
self._port.stop_http_server()
if self._websockets_server_started:
self._printer.write_update('Stopping WebSocket server ...')
self._websockets_server_started = False
self._port.stop_websocket_server()
def _clean_up_run(self):
_log.debug("Flushing stdout")
sys.stdout.flush()
_log.debug("Flushing stderr")
sys.stderr.flush()
_log.debug("Stopping helper")
self._port.stop_helper()
_log.debug("Cleaning up port")
self._port.clean_up_test_run()
def _check_for_stale_w3c_dir(self):
# TODO(dpranke): Remove this check after 1/1/2015 and let people deal with the warnings.
# Remove the check in port/base.py as well.
fs = self._port.host.filesystem
layout_tests_dir = self._port.layout_tests_dir()
if fs.isdir(fs.join(layout_tests_dir, 'w3c')):
_log.warning('WARNING: You still have the old LayoutTests/w3c directory in your checkout. You should delete it!')
def _force_pixel_tests_if_needed(self):
if self._options.pixel_tests:
return False
_log.debug("Restarting helper")
self._port.stop_helper()
self._options.pixel_tests = True
self._port.start_helper()
return True
def _look_for_new_crash_logs(self, run_results, start_time):
"""Since crash logs can take a long time to be written out if the system is
under stress do a second pass at the end of the test run.
run_results: the results of the test run
start_time: time the tests started at. We're looking for crash
logs after that time.
"""
crashed_processes = []
for test, result in run_results.unexpected_results_by_name.iteritems():
if (result.type != test_expectations.CRASH):
continue
for failure in result.failures:
if not isinstance(failure, test_failures.FailureCrash):
continue
if failure.has_log:
continue
crashed_processes.append([test, failure.process_name, failure.pid])
sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
if sample_files:
for test, sample_file in sample_files.iteritems():
writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
writer.copy_sample_file(sample_file)
crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
if crash_logs:
for test, crash_log in crash_logs.iteritems():
writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
writer.write_crash_log(crash_log)
def _clobber_old_results(self):
dir_above_results_path = self._filesystem.dirname(self._results_directory)
self._printer.write_update("Clobbering old results in %s" % dir_above_results_path)
if not self._filesystem.exists(dir_above_results_path):
return
file_list = self._filesystem.listdir(dir_above_results_path)
results_directories = []
for dir in file_list:
file_path = self._filesystem.join(dir_above_results_path, dir)
if self._filesystem.isdir(file_path) and self._results_directory in file_path:
results_directories.append(file_path)
self._delete_dirs(results_directories)
# Port specific clean-up.
self._port.clobber_old_port_specific_results()
def _tests_to_retry(self, run_results):
# TODO(ojan): This should also check that result.type != test_expectations.MISSING since retrying missing expectations is silly.
# But that's a bit tricky since we only consider the last retry attempt for the count of unexpected regressions.
return [result.test_name for result in run_results.unexpected_results_by_name.values() if result.type != test_expectations.PASS]
def _write_json_files(self, summarized_full_results, summarized_failing_results, initial_results, running_all_tests):
_log.debug("Writing JSON files in %s." % self._results_directory)
# FIXME: Upload stats.json to the server and delete times_ms.
times_trie = json_results_generator.test_timings_trie(initial_results.results_by_name.values())
times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
# Save out the times data so we can use it for --fastest in the future.
if running_all_tests:
bot_test_times_path = self._port.bot_test_times_path()
self._filesystem.maybe_make_directory(self._filesystem.dirname(bot_test_times_path))
json_results_generator.write_json(self._filesystem, times_trie, bot_test_times_path)
stats_trie = self._stats_trie(initial_results)
stats_path = self._filesystem.join(self._results_directory, "stats.json")
self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
json_results_generator.write_json(self._filesystem, summarized_full_results, full_results_path)
full_results_path = self._filesystem.join(self._results_directory, "failing_results.json")
# We write failing_results.json out as jsonp because we need to load it from a file url for results.html and Chromium doesn't allow that.
json_results_generator.write_json(self._filesystem, summarized_failing_results, full_results_path, callback="ADD_RESULTS")
_log.debug("Finished writing JSON files.")
def _upload_json_files(self):
if not self._options.test_results_server:
return
if not self._options.master_name:
_log.error("--test-results-server was set, but --master-name was not. Not uploading JSON files.")
return
_log.debug("Uploading JSON files for builder: %s", self._options.builder_name)
attrs = [("builder", self._options.builder_name),
("testtype", "layout-tests"),
("master", self._options.master_name)]
files = [(file, self._filesystem.join(self._results_directory, file)) for file in ["failing_results.json", "full_results.json", "times_ms.json"]]
url = "http://%s/testfile/upload" % self._options.test_results_server
# Set uploading timeout in case appengine server is having problems.
# 120 seconds are more than enough to upload test results.
uploader = FileUploader(url, 120)
try:
response = uploader.upload_as_multipart_form_data(self._filesystem, files, attrs)
if response:
if response.code == 200:
_log.debug("JSON uploaded.")
else:
_log.debug("JSON upload failed, %d: '%s'" % (response.code, response.read()))
else:
_log.error("JSON upload failed; no response returned")
except Exception, err:
_log.error("Upload failed: %s" % err)
def _copy_results_html_file(self, destination_path):
base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
results_file = self._filesystem.join(base_dir, 'results.html')
# Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
# so make sure it exists before we try to copy it.
if self._filesystem.exists(results_file):
self._filesystem.copyfile(results_file, destination_path)
def _stats_trie(self, initial_results):
def _worker_number(worker_name):
return int(worker_name.split('/')[1]) if worker_name else -1
stats = {}
for result in initial_results.results_by_name.values():
if result.type != test_expectations.SKIP:
stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
stats_trie = {}
for name, value in stats.iteritems():
json_results_generator.add_path_to_trie(name, value, stats_trie)
return stats_trie<|fim▁end|> | self._look_for_new_crash_logs(retry_attempt_results, start_time)
|
<|file_name|>from.js<|end_file_name|><|fim▁begin|>'use strict';
require('../../modules/es.weak-set');
require('../../modules/esnext.weak-set.from');
var WeakSet = require('../../internals/path').WeakSet;
var weakSetfrom = WeakSet.from;<|fim▁hole|>};<|fim▁end|> |
module.exports = function from(source, mapFn, thisArg) {
return weakSetfrom.call(typeof this === 'function' ? this : WeakSet, source, mapFn, thisArg); |
<|file_name|>home.py<|end_file_name|><|fim▁begin|>"""
This page is in the table of contents.
Plugin to home the tool at beginning of each layer.
The home manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Home
==Operation==
The default 'Activate Home' checkbox is on. When it is on, the functions described below will work, when it is off, nothing will be done.
==Settings==
===Name of Home File===
Default: home.gcode
At the beginning of a each layer, home will add the commands of a gcode script with the name of the "Name of Home File" setting, if one exists. Home does not care if the text file names are capitalized, but some file systems do not handle file name cases properly, so to be on the safe side you should give them lower case names. Home looks for those files in the alterations folder in the .skeinforge folder in the home directory. If it doesn't find the file it then looks in the alterations folder in the skeinforge_plugins folder.
==Examples==
The following examples home the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and home.py.
> python home.py
This brings up the home dialog.
> python home.py Screw Holder Bottom.stl
The home tool is parsing the file:
Screw Holder Bottom.stl
..
The home tool has created the file:
.. Screw Holder Bottom_home.gcode
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities.vector3 import Vector3<|fim▁hole|>from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import math
import os
import sys
__author__ = 'Enrique Perez ([email protected])'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedText( fileName, text, repository = None ):
"Home a gcode linear move file or text."
return getCraftedTextFromText(archive.getTextIfEmpty(fileName, text), repository)
def getCraftedTextFromText( gcodeText, repository = None ):
"Home a gcode linear move text."
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'home'):
return gcodeText
if repository == None:
repository = settings.getReadRepository( HomeRepository() )
if not repository.activateHome.value:
return gcodeText
return HomeSkein().getCraftedGcode(gcodeText, repository)
def getNewRepository():
'Get new repository.'
return HomeRepository()
def writeOutput(fileName, shouldAnalyze=True):
"Home a gcode linear move file. Chain home the gcode if it is not already homed."
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'home', shouldAnalyze)
class HomeRepository:
"A class to handle the home settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.home.html', self)
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Home', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Home')
self.activateHome = settings.BooleanSetting().getFromValue('Activate Home', self, True )
self.nameOfHomeFile = settings.StringSetting().getFromValue('Name of Home File:', self, 'home.gcode')
self.executeTitle = 'Home'
def execute(self):
"Home button has been clicked."
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class HomeSkein:
"A class to home a skein of extrusions."
def __init__(self):
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.extruderActive = False
self.highestZ = None
self.homeLines = []
self.layerCount = settings.LayerCount()
self.lineIndex = 0
self.lines = None
self.oldLocation = None
self.shouldHome = False
self.travelFeedRateMinute = 957.0
def addFloat( self, begin, end ):
"Add dive to the original height."
beginEndDistance = begin.distance(end)
alongWay = self.absolutePerimeterWidth / beginEndDistance
closeToEnd = euclidean.getIntermediateLocation( alongWay, end, begin )
closeToEnd.z = self.highestZ
self.distanceFeedRate.addLine( self.distanceFeedRate.getLinearGcodeMovementWithFeedRate( self.travelFeedRateMinute, closeToEnd.dropAxis(), closeToEnd.z ) )
def addHomeTravel( self, splitLine ):
"Add the home travel gcode."
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
self.highestZ = max( self.highestZ, location.z )
if not self.shouldHome:
return
self.shouldHome = False
if self.oldLocation == None:
return
if self.extruderActive:
self.distanceFeedRate.addLine('M103')
self.addHopUp( self.oldLocation )
self.distanceFeedRate.addLinesSetAbsoluteDistanceMode(self.homeLines)
self.addHopUp( self.oldLocation )
self.addFloat( self.oldLocation, location )
if self.extruderActive:
self.distanceFeedRate.addLine('M101')
def addHopUp(self, location):
"Add hop to highest point."
locationUp = Vector3( location.x, location.y, self.highestZ )
self.distanceFeedRate.addLine( self.distanceFeedRate.getLinearGcodeMovementWithFeedRate( self.travelFeedRateMinute, locationUp.dropAxis(), locationUp.z ) )
def getCraftedGcode( self, gcodeText, repository ):
"Parse gcode text and store the home gcode."
self.repository = repository
self.homeLines = settings.getAlterationFileLines(repository.nameOfHomeFile.value)
if len(self.homeLines) < 1:
return gcodeText
self.lines = archive.getTextLines(gcodeText)
self.parseInitialization( repository )
for self.lineIndex in xrange(self.lineIndex, len(self.lines)):
line = self.lines[self.lineIndex]
self.parseLine(line)
return self.distanceFeedRate.output.getvalue()
def parseInitialization( self, repository ):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('home')
return
elif firstWord == '(<perimeterWidth>':
self.absolutePerimeterWidth = abs(float(splitLine[1]))
elif firstWord == '(<travelFeedRatePerSecond>':
self.travelFeedRateMinute = 60.0 * float(splitLine[1])
self.distanceFeedRate.addLine(line)
def parseLine(self, line):
"Parse a gcode line and add it to the bevel gcode."
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if firstWord == 'G1':
self.addHomeTravel(splitLine)
self.oldLocation = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
elif firstWord == '(<layer>':
self.layerCount.printProgressIncrement('home')
if len(self.homeLines) > 0:
self.shouldHome = True
elif firstWord == 'M101':
self.extruderActive = True
elif firstWord == 'M103':
self.extruderActive = False
self.distanceFeedRate.addLine(line)
def main():
"Display the home dialog."
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == "__main__":
main()<|fim▁end|> | from fabmetheus_utilities import archive |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>import sys<|fim▁hole|>
__version__ = '2.0.5'
from cfchecker.cfchecks import getargs, CFChecker
def cfchecks_main():
"""cfchecks_main is based on the main program block in cfchecks.py
"""
(badc,coards,uploader,useFileName,standardName,areaTypes,udunitsDat,version,files)=getargs(sys.argv)
inst = CFChecker(uploader=uploader, useFileName=useFileName, badc=badc, coards=coards, cfStandardNamesXML=standardName, cfAreaTypesXML=areaTypes, udunitsDat=udunitsDat, version=version)
for file in files:
rc = inst.checker(file)
sys.exit (rc)<|fim▁end|> | import os
import os.path as op
|
<|file_name|>CouchbaseServer.py<|end_file_name|><|fim▁begin|>#
# Copyright (c) 2013, Centre National de la Recherche Scientifique
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import string
import os
import os.path
from random import choice
import stratuslab.system.SystemFactory as SystemFactory
from stratuslab.installator.Installator import Installator
from stratuslab import Util
from stratuslab.Util import printError
class CouchbaseServer(Installator):
@staticmethod
def _generate_password():
chars = string.letters + string.digits
length = 8
return ''.join([choice(chars) for _ in range(length)])
@staticmethod
def _cb_cmd(func, host, options):
opts = ' '.join(options)
cmd = '/opt/couchbase/bin/couchbase-cli %s -c %s:8091 %s' % (func, host, opts)
return cmd
<|fim▁hole|> self._serviceName = 'couchbase-server'
self._packages = ['couchbase-server']
self._cb_cluster_username = 'admin'
self._cb_cluster_password = CouchbaseServer._generate_password()
self._cb_cluster_password_path = '/opt/couchbase/cluster-password.txt'
def _installFrontend(self):
self._installPackages()
def _setupFrontend(self):
if os.path.exists(self._cb_cluster_password_path):
Util.printStep('%s exists; skipping Couchbase configuration' % self._cb_cluster_password_path)
else:
self._configure()
def _startServicesFrontend(self):
self._restartService()
def _installPackages(self):
Util.printStep('Installing Couchbase packages')
self.system.installPackages(self._packages)
def _configure(self):
Util.printStep('(Re-)starting Couchbase')
cmd = 'service %s restart' % self._serviceName
self._executeExitOnError(cmd)
time.sleep(5)
Util.printStep('Set Couchbase data location')
options = ['--node-init-data-path=/opt/couchbase/var/lib/couchbase/data']
cmd = CouchbaseServer._cb_cmd('node-init', self.frontendIp, options)
self._executeExitOnError(cmd)
Util.printStep('Create default Couchbase bucket')
options = ['--bucket=default',
'--bucket-type=couchbase',
'--bucket-ramsize=400',
'--bucket-replica=1']
cmd = CouchbaseServer._cb_cmd('bucket-create', self.frontendIp, options)
self._executeExitOnError(cmd)
Util.printStep('Initialize Couchbase admin account')
options = ['--cluster-init-username=%s' % self._cb_cluster_username,
'--cluster-init-password=%s' % self._cb_cluster_password]
cmd = CouchbaseServer._cb_cmd('cluster-init', self.frontendIp, options)
self._executeExitOnError(cmd)
Util.printStep('Saving cluster password in %s' % self._cb_cluster_password_path)
with open(self._cb_cluster_password_path, 'w') as f:
f.write(self._cb_cluster_password + "\n")
Util.printStep('Reducing read access to password file')
os.chmod(self._cb_cluster_password_path, 0400)
def _restartService(self):
Util.printStep('Adding %s to chkconfig and restarting' % self._serviceName)
cmd = 'chkconfig --add %s' % self._serviceName
Util.execute(cmd.split(' '))
cmd = 'service %s restart' % self._serviceName
Util.execute(cmd.split(' '))
def _executeExitOnError(self, cmd_str):
rc, output = Util.execute(cmd_str.split(' '), withOutput=True, verboseLevel=self.verboseLevel,
verboseThreshold=Util.VERBOSE_LEVEL_DETAILED)
if rc != 0:
printError('Failed running: %s\n%s' % (cmd_str, output))<|fim▁end|> | def __init__(self, configHolder):
configHolder.assign(self)
self.system = SystemFactory.getSystem(self.frontendSystem, configHolder)
|
<|file_name|>enums.go<|end_file_name|><|fim▁begin|>package machinelearningservices
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// AllocationState enumerates the values for allocation state.
type AllocationState string
const (
// AllocationStateResizing ...
AllocationStateResizing AllocationState = "Resizing"
// AllocationStateSteady ...
AllocationStateSteady AllocationState = "Steady"
)
// PossibleAllocationStateValues returns an array of possible values for the AllocationState const type.
func PossibleAllocationStateValues() []AllocationState {
return []AllocationState{AllocationStateResizing, AllocationStateSteady}
}
// ApplicationSharingPolicy enumerates the values for application sharing policy.
type ApplicationSharingPolicy string
const (
// ApplicationSharingPolicyPersonal ...
ApplicationSharingPolicyPersonal ApplicationSharingPolicy = "Personal"
// ApplicationSharingPolicyShared ...
ApplicationSharingPolicyShared ApplicationSharingPolicy = "Shared"
)
// PossibleApplicationSharingPolicyValues returns an array of possible values for the ApplicationSharingPolicy const type.
func PossibleApplicationSharingPolicyValues() []ApplicationSharingPolicy {
return []ApplicationSharingPolicy{ApplicationSharingPolicyPersonal, ApplicationSharingPolicyShared}
}
// ClusterPurpose enumerates the values for cluster purpose.
type ClusterPurpose string
const (
// ClusterPurposeDenseProd ...
ClusterPurposeDenseProd ClusterPurpose = "DenseProd"
// ClusterPurposeDevTest ...
ClusterPurposeDevTest ClusterPurpose = "DevTest"
// ClusterPurposeFastProd ...
ClusterPurposeFastProd ClusterPurpose = "FastProd"
)
// PossibleClusterPurposeValues returns an array of possible values for the ClusterPurpose const type.
func PossibleClusterPurposeValues() []ClusterPurpose {
return []ClusterPurpose{ClusterPurposeDenseProd, ClusterPurposeDevTest, ClusterPurposeFastProd}
}
// ComputeInstanceAuthorizationType enumerates the values for compute instance authorization type.
type ComputeInstanceAuthorizationType string
const (
// ComputeInstanceAuthorizationTypePersonal ...
ComputeInstanceAuthorizationTypePersonal ComputeInstanceAuthorizationType = "personal"
)
// PossibleComputeInstanceAuthorizationTypeValues returns an array of possible values for the ComputeInstanceAuthorizationType const type.
func PossibleComputeInstanceAuthorizationTypeValues() []ComputeInstanceAuthorizationType {
return []ComputeInstanceAuthorizationType{ComputeInstanceAuthorizationTypePersonal}
}
// ComputeInstanceState enumerates the values for compute instance state.
type ComputeInstanceState string
const (
// ComputeInstanceStateCreateFailed ...
ComputeInstanceStateCreateFailed ComputeInstanceState = "CreateFailed"
// ComputeInstanceStateCreating ...
ComputeInstanceStateCreating ComputeInstanceState = "Creating"
// ComputeInstanceStateDeleting ...
ComputeInstanceStateDeleting ComputeInstanceState = "Deleting"
// ComputeInstanceStateJobRunning ...
ComputeInstanceStateJobRunning ComputeInstanceState = "JobRunning"
// ComputeInstanceStateRestarting ...
ComputeInstanceStateRestarting ComputeInstanceState = "Restarting"
// ComputeInstanceStateRunning ...
ComputeInstanceStateRunning ComputeInstanceState = "Running"
// ComputeInstanceStateSettingUp ...
ComputeInstanceStateSettingUp ComputeInstanceState = "SettingUp"
// ComputeInstanceStateSetupFailed ...
ComputeInstanceStateSetupFailed ComputeInstanceState = "SetupFailed"
// ComputeInstanceStateStarting ...
ComputeInstanceStateStarting ComputeInstanceState = "Starting"
// ComputeInstanceStateStopped ...
ComputeInstanceStateStopped ComputeInstanceState = "Stopped"
// ComputeInstanceStateStopping ...
ComputeInstanceStateStopping ComputeInstanceState = "Stopping"
// ComputeInstanceStateUnknown ...
ComputeInstanceStateUnknown ComputeInstanceState = "Unknown"
// ComputeInstanceStateUnusable ...
ComputeInstanceStateUnusable ComputeInstanceState = "Unusable"
// ComputeInstanceStateUserSettingUp ...
ComputeInstanceStateUserSettingUp ComputeInstanceState = "UserSettingUp"
// ComputeInstanceStateUserSetupFailed ...
ComputeInstanceStateUserSetupFailed ComputeInstanceState = "UserSetupFailed"
)
// PossibleComputeInstanceStateValues returns an array of possible values for the ComputeInstanceState const type.
func PossibleComputeInstanceStateValues() []ComputeInstanceState {
return []ComputeInstanceState{ComputeInstanceStateCreateFailed, ComputeInstanceStateCreating, ComputeInstanceStateDeleting, ComputeInstanceStateJobRunning, ComputeInstanceStateRestarting, ComputeInstanceStateRunning, ComputeInstanceStateSettingUp, ComputeInstanceStateSetupFailed, ComputeInstanceStateStarting, ComputeInstanceStateStopped, ComputeInstanceStateStopping, ComputeInstanceStateUnknown, ComputeInstanceStateUnusable, ComputeInstanceStateUserSettingUp, ComputeInstanceStateUserSetupFailed}
}
// ComputeType enumerates the values for compute type.
type ComputeType string
const (
// ComputeTypeAKS ...
ComputeTypeAKS ComputeType = "AKS"
// ComputeTypeAmlCompute ...
ComputeTypeAmlCompute ComputeType = "AmlCompute"
// ComputeTypeComputeInstance ...
ComputeTypeComputeInstance ComputeType = "ComputeInstance"
// ComputeTypeDatabricks ...
ComputeTypeDatabricks ComputeType = "Databricks"
// ComputeTypeDataFactory ...
ComputeTypeDataFactory ComputeType = "DataFactory"
// ComputeTypeDataLakeAnalytics ...
ComputeTypeDataLakeAnalytics ComputeType = "DataLakeAnalytics"
// ComputeTypeHDInsight ...
ComputeTypeHDInsight ComputeType = "HDInsight"
// ComputeTypeSynapseSpark ...
ComputeTypeSynapseSpark ComputeType = "SynapseSpark"
// ComputeTypeVirtualMachine ...
ComputeTypeVirtualMachine ComputeType = "VirtualMachine"
)
// PossibleComputeTypeValues returns an array of possible values for the ComputeType const type.
func PossibleComputeTypeValues() []ComputeType {
return []ComputeType{ComputeTypeAKS, ComputeTypeAmlCompute, ComputeTypeComputeInstance, ComputeTypeDatabricks, ComputeTypeDataFactory, ComputeTypeDataLakeAnalytics, ComputeTypeHDInsight, ComputeTypeSynapseSpark, ComputeTypeVirtualMachine}
}
// ComputeTypeBasicCompute enumerates the values for compute type basic compute.
type ComputeTypeBasicCompute string
const (
// ComputeTypeBasicComputeComputeTypeAKS ...
ComputeTypeBasicComputeComputeTypeAKS ComputeTypeBasicCompute = "AKS"
// ComputeTypeBasicComputeComputeTypeAmlCompute ...
ComputeTypeBasicComputeComputeTypeAmlCompute ComputeTypeBasicCompute = "AmlCompute"
// ComputeTypeBasicComputeComputeTypeCompute ...
ComputeTypeBasicComputeComputeTypeCompute ComputeTypeBasicCompute = "Compute"
// ComputeTypeBasicComputeComputeTypeComputeInstance ...
ComputeTypeBasicComputeComputeTypeComputeInstance ComputeTypeBasicCompute = "ComputeInstance"
// ComputeTypeBasicComputeComputeTypeDatabricks ...
ComputeTypeBasicComputeComputeTypeDatabricks ComputeTypeBasicCompute = "Databricks"
// ComputeTypeBasicComputeComputeTypeDataFactory ...
ComputeTypeBasicComputeComputeTypeDataFactory ComputeTypeBasicCompute = "DataFactory"
// ComputeTypeBasicComputeComputeTypeDataLakeAnalytics ...
ComputeTypeBasicComputeComputeTypeDataLakeAnalytics ComputeTypeBasicCompute = "DataLakeAnalytics"
// ComputeTypeBasicComputeComputeTypeHDInsight ...
ComputeTypeBasicComputeComputeTypeHDInsight ComputeTypeBasicCompute = "HDInsight"
// ComputeTypeBasicComputeComputeTypeVirtualMachine ...
ComputeTypeBasicComputeComputeTypeVirtualMachine ComputeTypeBasicCompute = "VirtualMachine"
)
// PossibleComputeTypeBasicComputeValues returns an array of possible values for the ComputeTypeBasicCompute const type.
func PossibleComputeTypeBasicComputeValues() []ComputeTypeBasicCompute {
return []ComputeTypeBasicCompute{ComputeTypeBasicComputeComputeTypeAKS, ComputeTypeBasicComputeComputeTypeAmlCompute, ComputeTypeBasicComputeComputeTypeCompute, ComputeTypeBasicComputeComputeTypeComputeInstance, ComputeTypeBasicComputeComputeTypeDatabricks, ComputeTypeBasicComputeComputeTypeDataFactory, ComputeTypeBasicComputeComputeTypeDataLakeAnalytics, ComputeTypeBasicComputeComputeTypeHDInsight, ComputeTypeBasicComputeComputeTypeVirtualMachine}
}
// ComputeTypeBasicComputeNodesInformation enumerates the values for compute type basic compute nodes
// information.
type ComputeTypeBasicComputeNodesInformation string
const (
// ComputeTypeBasicComputeNodesInformationComputeTypeAmlCompute ...
ComputeTypeBasicComputeNodesInformationComputeTypeAmlCompute ComputeTypeBasicComputeNodesInformation = "AmlCompute"
// ComputeTypeBasicComputeNodesInformationComputeTypeComputeNodesInformation ...
ComputeTypeBasicComputeNodesInformationComputeTypeComputeNodesInformation ComputeTypeBasicComputeNodesInformation = "ComputeNodesInformation"
)
// PossibleComputeTypeBasicComputeNodesInformationValues returns an array of possible values for the ComputeTypeBasicComputeNodesInformation const type.
func PossibleComputeTypeBasicComputeNodesInformationValues() []ComputeTypeBasicComputeNodesInformation {
return []ComputeTypeBasicComputeNodesInformation{ComputeTypeBasicComputeNodesInformationComputeTypeAmlCompute, ComputeTypeBasicComputeNodesInformationComputeTypeComputeNodesInformation}
}
// ComputeTypeBasicComputeSecrets enumerates the values for compute type basic compute secrets.
type ComputeTypeBasicComputeSecrets string
const (
// ComputeTypeBasicComputeSecretsComputeTypeAKS ...
ComputeTypeBasicComputeSecretsComputeTypeAKS ComputeTypeBasicComputeSecrets = "AKS"
// ComputeTypeBasicComputeSecretsComputeTypeComputeSecrets ...
ComputeTypeBasicComputeSecretsComputeTypeComputeSecrets ComputeTypeBasicComputeSecrets = "ComputeSecrets"
// ComputeTypeBasicComputeSecretsComputeTypeDatabricks ...
ComputeTypeBasicComputeSecretsComputeTypeDatabricks ComputeTypeBasicComputeSecrets = "Databricks"
// ComputeTypeBasicComputeSecretsComputeTypeVirtualMachine ...
ComputeTypeBasicComputeSecretsComputeTypeVirtualMachine ComputeTypeBasicComputeSecrets = "VirtualMachine"
)
// PossibleComputeTypeBasicComputeSecretsValues returns an array of possible values for the ComputeTypeBasicComputeSecrets const type.
func PossibleComputeTypeBasicComputeSecretsValues() []ComputeTypeBasicComputeSecrets {
return []ComputeTypeBasicComputeSecrets{ComputeTypeBasicComputeSecretsComputeTypeAKS, ComputeTypeBasicComputeSecretsComputeTypeComputeSecrets, ComputeTypeBasicComputeSecretsComputeTypeDatabricks, ComputeTypeBasicComputeSecretsComputeTypeVirtualMachine}
}
// ComputeTypeBasicCreateServiceRequest enumerates the values for compute type basic create service request.
type ComputeTypeBasicCreateServiceRequest string
const (
// ComputeTypeBasicCreateServiceRequestComputeTypeACI ...
ComputeTypeBasicCreateServiceRequestComputeTypeACI ComputeTypeBasicCreateServiceRequest = "ACI"
// ComputeTypeBasicCreateServiceRequestComputeTypeAKS ...
ComputeTypeBasicCreateServiceRequestComputeTypeAKS ComputeTypeBasicCreateServiceRequest = "AKS"
// ComputeTypeBasicCreateServiceRequestComputeTypeCreateServiceRequest ...
ComputeTypeBasicCreateServiceRequestComputeTypeCreateServiceRequest ComputeTypeBasicCreateServiceRequest = "CreateServiceRequest"
// ComputeTypeBasicCreateServiceRequestComputeTypeCustom ...
ComputeTypeBasicCreateServiceRequestComputeTypeCustom ComputeTypeBasicCreateServiceRequest = "Custom"
)
// PossibleComputeTypeBasicCreateServiceRequestValues returns an array of possible values for the ComputeTypeBasicCreateServiceRequest const type.
func PossibleComputeTypeBasicCreateServiceRequestValues() []ComputeTypeBasicCreateServiceRequest {
return []ComputeTypeBasicCreateServiceRequest{ComputeTypeBasicCreateServiceRequestComputeTypeACI, ComputeTypeBasicCreateServiceRequestComputeTypeAKS, ComputeTypeBasicCreateServiceRequestComputeTypeCreateServiceRequest, ComputeTypeBasicCreateServiceRequestComputeTypeCustom}
}
// ComputeTypeBasicServiceResponseBase enumerates the values for compute type basic service response base.
type ComputeTypeBasicServiceResponseBase string
const (
// ComputeTypeBasicServiceResponseBaseComputeTypeACI ...
ComputeTypeBasicServiceResponseBaseComputeTypeACI ComputeTypeBasicServiceResponseBase = "ACI"
// ComputeTypeBasicServiceResponseBaseComputeTypeAKS ...
ComputeTypeBasicServiceResponseBaseComputeTypeAKS ComputeTypeBasicServiceResponseBase = "AKS"
// ComputeTypeBasicServiceResponseBaseComputeTypeCustom ...
ComputeTypeBasicServiceResponseBaseComputeTypeCustom ComputeTypeBasicServiceResponseBase = "Custom"
// ComputeTypeBasicServiceResponseBaseComputeTypeServiceResponseBase ...
ComputeTypeBasicServiceResponseBaseComputeTypeServiceResponseBase ComputeTypeBasicServiceResponseBase = "ServiceResponseBase"
)
// PossibleComputeTypeBasicServiceResponseBaseValues returns an array of possible values for the ComputeTypeBasicServiceResponseBase const type.
func PossibleComputeTypeBasicServiceResponseBaseValues() []ComputeTypeBasicServiceResponseBase {
return []ComputeTypeBasicServiceResponseBase{ComputeTypeBasicServiceResponseBaseComputeTypeACI, ComputeTypeBasicServiceResponseBaseComputeTypeAKS, ComputeTypeBasicServiceResponseBaseComputeTypeCustom, ComputeTypeBasicServiceResponseBaseComputeTypeServiceResponseBase}
}
// DeploymentType enumerates the values for deployment type.
type DeploymentType string
const (
// DeploymentTypeBatch ...
DeploymentTypeBatch DeploymentType = "Batch"
// DeploymentTypeGRPCRealtimeEndpoint ...
DeploymentTypeGRPCRealtimeEndpoint DeploymentType = "GRPCRealtimeEndpoint"
// DeploymentTypeHTTPRealtimeEndpoint ...
DeploymentTypeHTTPRealtimeEndpoint DeploymentType = "HttpRealtimeEndpoint"
)
// PossibleDeploymentTypeValues returns an array of possible values for the DeploymentType const type.
func PossibleDeploymentTypeValues() []DeploymentType {
return []DeploymentType{DeploymentTypeBatch, DeploymentTypeGRPCRealtimeEndpoint, DeploymentTypeHTTPRealtimeEndpoint}
}
// EncryptionStatus enumerates the values for encryption status.
type EncryptionStatus string
const (
// EncryptionStatusDisabled ...
EncryptionStatusDisabled EncryptionStatus = "Disabled"
// EncryptionStatusEnabled ...
EncryptionStatusEnabled EncryptionStatus = "Enabled"
)
// PossibleEncryptionStatusValues returns an array of possible values for the EncryptionStatus const type.
func PossibleEncryptionStatusValues() []EncryptionStatus {
return []EncryptionStatus{EncryptionStatusDisabled, EncryptionStatusEnabled}
}
// IdentityType enumerates the values for identity type.
type IdentityType string
const (
// IdentityTypeApplication ...
IdentityTypeApplication IdentityType = "Application"
// IdentityTypeKey ...
IdentityTypeKey IdentityType = "Key"
// IdentityTypeManagedIdentity ...
IdentityTypeManagedIdentity IdentityType = "ManagedIdentity"
// IdentityTypeUser ...
IdentityTypeUser IdentityType = "User"
)
// PossibleIdentityTypeValues returns an array of possible values for the IdentityType const type.
func PossibleIdentityTypeValues() []IdentityType {
return []IdentityType{IdentityTypeApplication, IdentityTypeKey, IdentityTypeManagedIdentity, IdentityTypeUser}
}
// LoadBalancerType enumerates the values for load balancer type.
type LoadBalancerType string
const (
// LoadBalancerTypeInternalLoadBalancer ...
LoadBalancerTypeInternalLoadBalancer LoadBalancerType = "InternalLoadBalancer"
// LoadBalancerTypePublicIP ...
LoadBalancerTypePublicIP LoadBalancerType = "PublicIp"
)
// PossibleLoadBalancerTypeValues returns an array of possible values for the LoadBalancerType const type.
func PossibleLoadBalancerTypeValues() []LoadBalancerType {
return []LoadBalancerType{LoadBalancerTypeInternalLoadBalancer, LoadBalancerTypePublicIP}
}
// NodeState enumerates the values for node state.
type NodeState string
const (
// NodeStateIdle ...
NodeStateIdle NodeState = "idle"
// NodeStateLeaving ...
NodeStateLeaving NodeState = "leaving"
// NodeStatePreempted ...
NodeStatePreempted NodeState = "preempted"
// NodeStatePreparing ...
NodeStatePreparing NodeState = "preparing"
// NodeStateRunning ...
NodeStateRunning NodeState = "running"
// NodeStateUnusable ...
NodeStateUnusable NodeState = "unusable"
)
// PossibleNodeStateValues returns an array of possible values for the NodeState const type.
func PossibleNodeStateValues() []NodeState {
return []NodeState{NodeStateIdle, NodeStateLeaving, NodeStatePreempted, NodeStatePreparing, NodeStateRunning, NodeStateUnusable}
}
// OperationName enumerates the values for operation name.
type OperationName string
const (
// OperationNameCreate ...
OperationNameCreate OperationName = "Create"
// OperationNameDelete ...
OperationNameDelete OperationName = "Delete"
// OperationNameReimage ...
OperationNameReimage OperationName = "Reimage"
// OperationNameRestart ...
OperationNameRestart OperationName = "Restart"
// OperationNameStart ...
OperationNameStart OperationName = "Start"
// OperationNameStop ...
OperationNameStop OperationName = "Stop"
)
// PossibleOperationNameValues returns an array of possible values for the OperationName const type.
func PossibleOperationNameValues() []OperationName {
return []OperationName{OperationNameCreate, OperationNameDelete, OperationNameReimage, OperationNameRestart, OperationNameStart, OperationNameStop}
}
// OperationStatus enumerates the values for operation status.
type OperationStatus string
const (
// OperationStatusCreateFailed ...
OperationStatusCreateFailed OperationStatus = "CreateFailed"
// OperationStatusDeleteFailed ...
OperationStatusDeleteFailed OperationStatus = "DeleteFailed"
// OperationStatusInProgress ...
OperationStatusInProgress OperationStatus = "InProgress"
// OperationStatusReimageFailed ...
OperationStatusReimageFailed OperationStatus = "ReimageFailed"
// OperationStatusRestartFailed ...
OperationStatusRestartFailed OperationStatus = "RestartFailed"
// OperationStatusStartFailed ...
OperationStatusStartFailed OperationStatus = "StartFailed"
// OperationStatusStopFailed ...
OperationStatusStopFailed OperationStatus = "StopFailed"
// OperationStatusSucceeded ...
OperationStatusSucceeded OperationStatus = "Succeeded"
)
// PossibleOperationStatusValues returns an array of possible values for the OperationStatus const type.
func PossibleOperationStatusValues() []OperationStatus {
return []OperationStatus{OperationStatusCreateFailed, OperationStatusDeleteFailed, OperationStatusInProgress, OperationStatusReimageFailed, OperationStatusRestartFailed, OperationStatusStartFailed, OperationStatusStopFailed, OperationStatusSucceeded}
}
// OrderString enumerates the values for order string.
type OrderString string
const (
// OrderStringCreatedAtAsc ...
OrderStringCreatedAtAsc OrderString = "CreatedAtAsc"
// OrderStringCreatedAtDesc ...
OrderStringCreatedAtDesc OrderString = "CreatedAtDesc"
// OrderStringUpdatedAtAsc ...
OrderStringUpdatedAtAsc OrderString = "UpdatedAtAsc"
// OrderStringUpdatedAtDesc ...<|fim▁hole|>// PossibleOrderStringValues returns an array of possible values for the OrderString const type.
func PossibleOrderStringValues() []OrderString {
return []OrderString{OrderStringCreatedAtAsc, OrderStringCreatedAtDesc, OrderStringUpdatedAtAsc, OrderStringUpdatedAtDesc}
}
// OsType enumerates the values for os type.
type OsType string
const (
// OsTypeLinux ...
OsTypeLinux OsType = "Linux"
// OsTypeWindows ...
OsTypeWindows OsType = "Windows"
)
// PossibleOsTypeValues returns an array of possible values for the OsType const type.
func PossibleOsTypeValues() []OsType {
return []OsType{OsTypeLinux, OsTypeWindows}
}
// PrivateEndpointConnectionProvisioningState enumerates the values for private endpoint connection
// provisioning state.
type PrivateEndpointConnectionProvisioningState string
const (
// PrivateEndpointConnectionProvisioningStateCreating ...
PrivateEndpointConnectionProvisioningStateCreating PrivateEndpointConnectionProvisioningState = "Creating"
// PrivateEndpointConnectionProvisioningStateDeleting ...
PrivateEndpointConnectionProvisioningStateDeleting PrivateEndpointConnectionProvisioningState = "Deleting"
// PrivateEndpointConnectionProvisioningStateFailed ...
PrivateEndpointConnectionProvisioningStateFailed PrivateEndpointConnectionProvisioningState = "Failed"
// PrivateEndpointConnectionProvisioningStateSucceeded ...
PrivateEndpointConnectionProvisioningStateSucceeded PrivateEndpointConnectionProvisioningState = "Succeeded"
)
// PossiblePrivateEndpointConnectionProvisioningStateValues returns an array of possible values for the PrivateEndpointConnectionProvisioningState const type.
func PossiblePrivateEndpointConnectionProvisioningStateValues() []PrivateEndpointConnectionProvisioningState {
return []PrivateEndpointConnectionProvisioningState{PrivateEndpointConnectionProvisioningStateCreating, PrivateEndpointConnectionProvisioningStateDeleting, PrivateEndpointConnectionProvisioningStateFailed, PrivateEndpointConnectionProvisioningStateSucceeded}
}
// PrivateEndpointServiceConnectionStatus enumerates the values for private endpoint service connection status.
type PrivateEndpointServiceConnectionStatus string
const (
// PrivateEndpointServiceConnectionStatusApproved ...
PrivateEndpointServiceConnectionStatusApproved PrivateEndpointServiceConnectionStatus = "Approved"
// PrivateEndpointServiceConnectionStatusDisconnected ...
PrivateEndpointServiceConnectionStatusDisconnected PrivateEndpointServiceConnectionStatus = "Disconnected"
// PrivateEndpointServiceConnectionStatusPending ...
PrivateEndpointServiceConnectionStatusPending PrivateEndpointServiceConnectionStatus = "Pending"
// PrivateEndpointServiceConnectionStatusRejected ...
PrivateEndpointServiceConnectionStatusRejected PrivateEndpointServiceConnectionStatus = "Rejected"
// PrivateEndpointServiceConnectionStatusTimeout ...
PrivateEndpointServiceConnectionStatusTimeout PrivateEndpointServiceConnectionStatus = "Timeout"
)
// PossiblePrivateEndpointServiceConnectionStatusValues returns an array of possible values for the PrivateEndpointServiceConnectionStatus const type.
func PossiblePrivateEndpointServiceConnectionStatusValues() []PrivateEndpointServiceConnectionStatus {
return []PrivateEndpointServiceConnectionStatus{PrivateEndpointServiceConnectionStatusApproved, PrivateEndpointServiceConnectionStatusDisconnected, PrivateEndpointServiceConnectionStatusPending, PrivateEndpointServiceConnectionStatusRejected, PrivateEndpointServiceConnectionStatusTimeout}
}
// ProvisioningState enumerates the values for provisioning state.
type ProvisioningState string
const (
// ProvisioningStateCanceled ...
ProvisioningStateCanceled ProvisioningState = "Canceled"
// ProvisioningStateCreating ...
ProvisioningStateCreating ProvisioningState = "Creating"
// ProvisioningStateDeleting ...
ProvisioningStateDeleting ProvisioningState = "Deleting"
// ProvisioningStateFailed ...
ProvisioningStateFailed ProvisioningState = "Failed"
// ProvisioningStateSucceeded ...
ProvisioningStateSucceeded ProvisioningState = "Succeeded"
// ProvisioningStateUnknown ...
ProvisioningStateUnknown ProvisioningState = "Unknown"
// ProvisioningStateUpdating ...
ProvisioningStateUpdating ProvisioningState = "Updating"
)
// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type.
func PossibleProvisioningStateValues() []ProvisioningState {
return []ProvisioningState{ProvisioningStateCanceled, ProvisioningStateCreating, ProvisioningStateDeleting, ProvisioningStateFailed, ProvisioningStateSucceeded, ProvisioningStateUnknown, ProvisioningStateUpdating}
}
// QuotaUnit enumerates the values for quota unit.
type QuotaUnit string
const (
// QuotaUnitCount ...
QuotaUnitCount QuotaUnit = "Count"
)
// PossibleQuotaUnitValues returns an array of possible values for the QuotaUnit const type.
func PossibleQuotaUnitValues() []QuotaUnit {
return []QuotaUnit{QuotaUnitCount}
}
// ReasonCode enumerates the values for reason code.
type ReasonCode string
const (
// ReasonCodeNotAvailableForRegion ...
ReasonCodeNotAvailableForRegion ReasonCode = "NotAvailableForRegion"
// ReasonCodeNotAvailableForSubscription ...
ReasonCodeNotAvailableForSubscription ReasonCode = "NotAvailableForSubscription"
// ReasonCodeNotSpecified ...
ReasonCodeNotSpecified ReasonCode = "NotSpecified"
)
// PossibleReasonCodeValues returns an array of possible values for the ReasonCode const type.
func PossibleReasonCodeValues() []ReasonCode {
return []ReasonCode{ReasonCodeNotAvailableForRegion, ReasonCodeNotAvailableForSubscription, ReasonCodeNotSpecified}
}
// RemoteLoginPortPublicAccess enumerates the values for remote login port public access.
type RemoteLoginPortPublicAccess string
const (
// RemoteLoginPortPublicAccessDisabled ...
RemoteLoginPortPublicAccessDisabled RemoteLoginPortPublicAccess = "Disabled"
// RemoteLoginPortPublicAccessEnabled ...
RemoteLoginPortPublicAccessEnabled RemoteLoginPortPublicAccess = "Enabled"
// RemoteLoginPortPublicAccessNotSpecified ...
RemoteLoginPortPublicAccessNotSpecified RemoteLoginPortPublicAccess = "NotSpecified"
)
// PossibleRemoteLoginPortPublicAccessValues returns an array of possible values for the RemoteLoginPortPublicAccess const type.
func PossibleRemoteLoginPortPublicAccessValues() []RemoteLoginPortPublicAccess {
return []RemoteLoginPortPublicAccess{RemoteLoginPortPublicAccessDisabled, RemoteLoginPortPublicAccessEnabled, RemoteLoginPortPublicAccessNotSpecified}
}
// ResourceIdentityType enumerates the values for resource identity type.
type ResourceIdentityType string
const (
// ResourceIdentityTypeNone ...
ResourceIdentityTypeNone ResourceIdentityType = "None"
// ResourceIdentityTypeSystemAssigned ...
ResourceIdentityTypeSystemAssigned ResourceIdentityType = "SystemAssigned"
// ResourceIdentityTypeSystemAssignedUserAssigned ...
ResourceIdentityTypeSystemAssignedUserAssigned ResourceIdentityType = "SystemAssigned,UserAssigned"
// ResourceIdentityTypeUserAssigned ...
ResourceIdentityTypeUserAssigned ResourceIdentityType = "UserAssigned"
)
// PossibleResourceIdentityTypeValues returns an array of possible values for the ResourceIdentityType const type.
func PossibleResourceIdentityTypeValues() []ResourceIdentityType {
return []ResourceIdentityType{ResourceIdentityTypeNone, ResourceIdentityTypeSystemAssigned, ResourceIdentityTypeSystemAssignedUserAssigned, ResourceIdentityTypeUserAssigned}
}
// SSHPublicAccess enumerates the values for ssh public access.
type SSHPublicAccess string
const (
// SSHPublicAccessDisabled ...
SSHPublicAccessDisabled SSHPublicAccess = "Disabled"
// SSHPublicAccessEnabled ...
SSHPublicAccessEnabled SSHPublicAccess = "Enabled"
)
// PossibleSSHPublicAccessValues returns an array of possible values for the SSHPublicAccess const type.
func PossibleSSHPublicAccessValues() []SSHPublicAccess {
return []SSHPublicAccess{SSHPublicAccessDisabled, SSHPublicAccessEnabled}
}
// Status enumerates the values for status.
type Status string
const (
// StatusFailure ...
StatusFailure Status = "Failure"
// StatusInvalidQuotaBelowClusterMinimum ...
StatusInvalidQuotaBelowClusterMinimum Status = "InvalidQuotaBelowClusterMinimum"
// StatusInvalidQuotaExceedsSubscriptionLimit ...
StatusInvalidQuotaExceedsSubscriptionLimit Status = "InvalidQuotaExceedsSubscriptionLimit"
// StatusInvalidVMFamilyName ...
StatusInvalidVMFamilyName Status = "InvalidVMFamilyName"
// StatusOperationNotEnabledForRegion ...
StatusOperationNotEnabledForRegion Status = "OperationNotEnabledForRegion"
// StatusOperationNotSupportedForSku ...
StatusOperationNotSupportedForSku Status = "OperationNotSupportedForSku"
// StatusSuccess ...
StatusSuccess Status = "Success"
// StatusUndefined ...
StatusUndefined Status = "Undefined"
)
// PossibleStatusValues returns an array of possible values for the Status const type.
func PossibleStatusValues() []Status {
return []Status{StatusFailure, StatusInvalidQuotaBelowClusterMinimum, StatusInvalidQuotaExceedsSubscriptionLimit, StatusInvalidVMFamilyName, StatusOperationNotEnabledForRegion, StatusOperationNotSupportedForSku, StatusSuccess, StatusUndefined}
}
// Status1 enumerates the values for status 1.
type Status1 string
const (
// Status1Auto ...
Status1Auto Status1 = "Auto"
// Status1Disabled ...
Status1Disabled Status1 = "Disabled"
// Status1Enabled ...
Status1Enabled Status1 = "Enabled"
)
// PossibleStatus1Values returns an array of possible values for the Status1 const type.
func PossibleStatus1Values() []Status1 {
return []Status1{Status1Auto, Status1Disabled, Status1Enabled}
}
// UnderlyingResourceAction enumerates the values for underlying resource action.
type UnderlyingResourceAction string
const (
// UnderlyingResourceActionDelete ...
UnderlyingResourceActionDelete UnderlyingResourceAction = "Delete"
// UnderlyingResourceActionDetach ...
UnderlyingResourceActionDetach UnderlyingResourceAction = "Detach"
)
// PossibleUnderlyingResourceActionValues returns an array of possible values for the UnderlyingResourceAction const type.
func PossibleUnderlyingResourceActionValues() []UnderlyingResourceAction {
return []UnderlyingResourceAction{UnderlyingResourceActionDelete, UnderlyingResourceActionDetach}
}
// UsageUnit enumerates the values for usage unit.
type UsageUnit string
const (
// UsageUnitCount ...
UsageUnitCount UsageUnit = "Count"
)
// PossibleUsageUnitValues returns an array of possible values for the UsageUnit const type.
func PossibleUsageUnitValues() []UsageUnit {
return []UsageUnit{UsageUnitCount}
}
// ValueFormat enumerates the values for value format.
type ValueFormat string
const (
// ValueFormatJSON ...
ValueFormatJSON ValueFormat = "JSON"
)
// PossibleValueFormatValues returns an array of possible values for the ValueFormat const type.
func PossibleValueFormatValues() []ValueFormat {
return []ValueFormat{ValueFormatJSON}
}
// VariantType enumerates the values for variant type.
type VariantType string
const (
// VariantTypeControl ...
VariantTypeControl VariantType = "Control"
// VariantTypeTreatment ...
VariantTypeTreatment VariantType = "Treatment"
)
// PossibleVariantTypeValues returns an array of possible values for the VariantType const type.
func PossibleVariantTypeValues() []VariantType {
return []VariantType{VariantTypeControl, VariantTypeTreatment}
}
// VMPriceOSType enumerates the values for vm price os type.
type VMPriceOSType string
const (
// VMPriceOSTypeLinux ...
VMPriceOSTypeLinux VMPriceOSType = "Linux"
// VMPriceOSTypeWindows ...
VMPriceOSTypeWindows VMPriceOSType = "Windows"
)
// PossibleVMPriceOSTypeValues returns an array of possible values for the VMPriceOSType const type.
func PossibleVMPriceOSTypeValues() []VMPriceOSType {
return []VMPriceOSType{VMPriceOSTypeLinux, VMPriceOSTypeWindows}
}
// VMPriority enumerates the values for vm priority.
type VMPriority string
const (
// VMPriorityDedicated ...
VMPriorityDedicated VMPriority = "Dedicated"
// VMPriorityLowPriority ...
VMPriorityLowPriority VMPriority = "LowPriority"
)
// PossibleVMPriorityValues returns an array of possible values for the VMPriority const type.
func PossibleVMPriorityValues() []VMPriority {
return []VMPriority{VMPriorityDedicated, VMPriorityLowPriority}
}
// VMTier enumerates the values for vm tier.
type VMTier string
const (
// VMTierLowPriority ...
VMTierLowPriority VMTier = "LowPriority"
// VMTierSpot ...
VMTierSpot VMTier = "Spot"
// VMTierStandard ...
VMTierStandard VMTier = "Standard"
)
// PossibleVMTierValues returns an array of possible values for the VMTier const type.
func PossibleVMTierValues() []VMTier {
return []VMTier{VMTierLowPriority, VMTierSpot, VMTierStandard}
}
// WebServiceState enumerates the values for web service state.
type WebServiceState string
const (
// WebServiceStateFailed ...
WebServiceStateFailed WebServiceState = "Failed"
// WebServiceStateHealthy ...
WebServiceStateHealthy WebServiceState = "Healthy"
// WebServiceStateTransitioning ...
WebServiceStateTransitioning WebServiceState = "Transitioning"
// WebServiceStateUnhealthy ...
WebServiceStateUnhealthy WebServiceState = "Unhealthy"
// WebServiceStateUnschedulable ...
WebServiceStateUnschedulable WebServiceState = "Unschedulable"
)
// PossibleWebServiceStateValues returns an array of possible values for the WebServiceState const type.
func PossibleWebServiceStateValues() []WebServiceState {
return []WebServiceState{WebServiceStateFailed, WebServiceStateHealthy, WebServiceStateTransitioning, WebServiceStateUnhealthy, WebServiceStateUnschedulable}
}<|fim▁end|> | OrderStringUpdatedAtDesc OrderString = "UpdatedAtDesc"
)
|
<|file_name|>version.go<|end_file_name|><|fim▁begin|>package command
import (
"bytes"
"fmt"
"github.com/hashicorp/serf/serf"
"github.com/mitchellh/cli"
)
// VersionCommand is a Command implementation prints the version.
type VersionCommand struct {
Revision string
Version string
VersionPrerelease string
Ui cli.Ui<|fim▁hole|>func (c *VersionCommand) Help() string {
return ""
}
func (c *VersionCommand) Run(_ []string) int {
var versionString bytes.Buffer
fmt.Fprintf(&versionString, "Serf v%s", c.Version)
if c.VersionPrerelease != "" {
fmt.Fprintf(&versionString, ".%s", c.VersionPrerelease)
if c.Revision != "" {
fmt.Fprintf(&versionString, " (%s)", c.Revision)
}
}
c.Ui.Output(versionString.String())
c.Ui.Output(fmt.Sprintf("Agent Protocol: %d (Understands back to: %d)",
serf.ProtocolVersionMax, serf.ProtocolVersionMin))
return 0
}
func (c *VersionCommand) Synopsis() string {
return "Prints the Serf version"
}<|fim▁end|> | }
|
<|file_name|>advanced_excel.py<|end_file_name|><|fim▁begin|>"""
See http://pbpython.com/advanced-excel-workbooks.html for details on this script
"""
from __future__ import print_function
import pandas as pd
from xlsxwriter.utility import xl_rowcol_to_cell
def format_excel(writer, df_size):
""" Add Excel specific formatting to the workbook
df_size is a tuple representing the size of the dataframe - typically called
by df.shape -> (20,3)
"""
# Get the workbook and the summary sheet so we can add the formatting
workbook = writer.book
worksheet = writer.sheets['summary']
# Add currency formatting and apply it
money_fmt = workbook.add_format({'num_format': 42, 'align': 'center'})
worksheet.set_column('A:A', 20)
worksheet.set_column('B:C', 15, money_fmt)
# Add 1 to row so we can include a total<|fim▁hole|> # This assumes we start in the left hand corner
table_range = 'A1:{}'.format(table_end)
worksheet.add_table(table_range, {'columns': [{'header': 'account',
'total_string': 'Total'},
{'header': 'Total Sales',
'total_function': 'sum'},
{'header': 'Average Sales',
'total_function': 'average'}],
'autofilter': False,
'total_row': True,
'style': 'Table Style Medium 20'})
if __name__ == "__main__":
sales_df = pd.read_excel('https://github.com/chris1610/pbpython/blob/master/data/sample-salesv3.xlsx?raw=true')
sales_summary = sales_df.groupby(['name'])['ext price'].agg(['sum', 'mean'])
# Reset the index for consistency when saving in Excel
sales_summary.reset_index(inplace=True)
writer = pd.ExcelWriter('sales_summary.xlsx', engine='xlsxwriter')
sales_summary.to_excel(writer, 'summary', index=False)
format_excel(writer, sales_summary.shape)
writer.save()<|fim▁end|> | # subtract 1 from the column to handle because we don't care about index
table_end = xl_rowcol_to_cell(df_size[0] + 1, df_size[1] - 1) |
<|file_name|>ccroot.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python
# encoding: utf-8
import os,sys,re
import TaskGen,Task,Utils,preproc,Logs,Build,Options
from Logs import error,debug,warn
from Utils import md5
from TaskGen import taskgen,after,before,feature
from Constants import*
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import config_c
USE_TOP_LEVEL=False
win_platform=sys.platform in('win32','cygwin')
def get_cc_version(conf,cc,gcc=False,icc=False):
cmd=cc+['-dM','-E','-']
try:
p=Utils.pproc.Popen(cmd,stdin=Utils.pproc.PIPE,stdout=Utils.pproc.PIPE,stderr=Utils.pproc.PIPE)
p.stdin.write('\n')
out=p.communicate()[0]
except:
conf.fatal('could not determine the compiler version %r'%cmd)
out=str(out)
if gcc:
if out.find('__INTEL_COMPILER')>=0:
conf.fatal('The intel compiler pretends to be gcc')
if out.find('__GNUC__')<0:
conf.fatal('Could not determine the compiler type')
if icc and out.find('__INTEL_COMPILER')<0:
conf.fatal('Not icc/icpc')
k={}
if icc or gcc:
out=out.split('\n')
import shlex
for line in out:
lst=shlex.split(line)
if len(lst)>2:
key=lst[1]
val=lst[2]
k[key]=val
conf.env['CC_VERSION']=(k['__GNUC__'],k['__GNUC_MINOR__'],k['__GNUC_PATCHLEVEL__'])
return k
class DEBUG_LEVELS:
ULTRADEBUG="ultradebug"
DEBUG="debug"
RELEASE="release"
OPTIMIZED="optimized"
CUSTOM="custom"
ALL=[ULTRADEBUG,DEBUG,RELEASE,OPTIMIZED,CUSTOM]
def scan(self):
debug('ccroot: _scan_preprocessor(self, node, env, path_lst)')
if len(self.inputs)==1:
node=self.inputs[0]
(nodes,names)=preproc.get_deps(node,self.env,nodepaths=self.env['INC_PATHS'])
if Logs.verbose:
debug('deps: deps for %s: %r; unresolved %r'%(str(node),nodes,names))
return(nodes,names)
all_nodes=[]
all_names=[]
seen=[]
for node in self.inputs:
(nodes,names)=preproc.get_deps(node,self.env,nodepaths=self.env['INC_PATHS'])
if Logs.verbose:
debug('deps: deps for %s: %r; unresolved %r'%(str(node),nodes,names))
for x in nodes:
if id(x)in seen:continue
seen.append(id(x))
all_nodes.append(x)
for x in names:
if not x in all_names:
all_names.append(x)
return(all_nodes,all_names)
class ccroot_abstract(TaskGen.task_gen):
def __init__(self,*k,**kw):
if len(k)>1:
k=list(k)
if k[1][0]!='c':
k[1]='c'+k[1]
TaskGen.task_gen.__init__(self,*k,**kw)
def get_target_name(self):
tp='program'
for x in self.features:
if x in['cshlib','cstaticlib']:
tp=x.lstrip('c')
pattern=self.env[tp+'_PATTERN']
if not pattern:pattern='%s'
dir,name=os.path.split(self.target)
if win_platform and getattr(self,'vnum','')and'cshlib'in self.features:
name=name+'-'+self.vnum.split('.')[0]
return os.path.join(dir,pattern%name)
def install_implib(self):
bld=self.outputs[0].__class__.bld
bindir=self.install_path
if not len(self.outputs)==2:
raise ValueError('fail')
dll=self.outputs[0]
bld.install_as(bindir+os.sep+dll.name,dll.abspath(self.env),chmod=self.chmod,env=self.env)
implib=self.outputs[1]
libdir='${LIBDIR}'
if not self.env['LIBDIR']:
libdir='${PREFIX}/lib'
if sys.platform=='cygwin':
bld.symlink_as(libdir+'/'+implib.name,bindir+os.sep+dll.name,env=self.env)
else:
bld.install_as(libdir+'/'+implib.name,implib.abspath(self.env),env=self.env)
def install_shlib(self):
bld=self.outputs[0].__class__.bld
nums=self.vnum.split('.')
path=self.install_path
if not path:return
libname=self.outputs[0].name
name3=libname+'.'+self.vnum
name2=libname+'.'+nums[0]
name1=libname
filename=self.outputs[0].abspath(self.env)
bld.install_as(os.path.join(path,name3),filename,env=self.env)
bld.symlink_as(os.path.join(path,name2),name3)
bld.symlink_as(os.path.join(path,name1),name3)
def default_cc(self):
Utils.def_attrs(self,includes='',defines='',rpaths='',uselib='',uselib_local='',add_objects='',p_flag_vars=[],p_type_vars=[],compiled_tasks=[],link_task=None)
def apply_verif(self):
if not(self.source or getattr(self,'add_objects',None)):
raise Utils.WafError('no source files specified for %s'%self)
if not self.target:
raise Utils.WafError('no target for %s'%self)
def vars_target_cprogram(self):
self.default_install_path=self.env['BINDIR']or'${PREFIX}/bin'
self.default_chmod=O755
def vars_target_cstaticlib(self):
self.default_install_path=self.env['LIBDIR']or'${PREFIX}/lib${LIB_EXT}'
def vars_target_cshlib(self):
if win_platform:
self.default_install_path=self.env['BINDIR']or'${PREFIX}/bin'
self.default_chmod=O755
else:
self.default_install_path=self.env['LIBDIR']or'${PREFIX}/lib${LIB_EXT}'
def install_target_cstaticlib(self):
if not self.bld.is_install:return
self.link_task.install_path=self.install_path<|fim▁hole|>def apply_incpaths(self):
lst=[]
for lib in self.to_list(self.uselib):
for path in self.env['CPPPATH_'+lib]:
if not path in lst:
lst.append(path)
if preproc.go_absolute:
for path in preproc.standard_includes:
if not path in lst:
lst.append(path)
for path in self.to_list(self.includes):
if not path in lst:
if preproc.go_absolute or not os.path.isabs(path):
lst.append(path)
else:
self.env.prepend_value('CPPPATH',path)
for path in lst:
node=None
if os.path.isabs(path):
if preproc.go_absolute:
node=self.bld.root.find_dir(path)
elif path[0]=='#':
node=self.bld.srcnode
if len(path)>1:
node=node.find_dir(path[1:])
else:
node=self.path.find_dir(path)
if node:
self.env.append_value('INC_PATHS',node)
if USE_TOP_LEVEL:
self.env.append_value('INC_PATHS',self.bld.srcnode)
def apply_type_vars(self):
for x in self.features:
if not x in['cprogram','cstaticlib','cshlib']:
continue
x=x.lstrip('c')
st=self.env[x+'_USELIB']
if st:self.uselib=self.uselib+' '+st
for var in self.p_type_vars:
compvar='%s_%s'%(x,var)
value=self.env[compvar]
if value:self.env.append_value(var,value)
def apply_link(self):
link=getattr(self,'link',None)
if not link:
if'cstaticlib'in self.features:link='ar_link_static'
elif'cxx'in self.features:link='cxx_link'
else:link='cc_link'
if'cshlib'in self.features:
if win_platform:
link='dll_'+link
elif getattr(self,'vnum',''):
if sys.platform=='darwin':
self.vnum=''
else:
link='vnum_'+link
tsk=self.create_task(link)
outputs=[t.outputs[0]for t in self.compiled_tasks]
tsk.set_inputs(outputs)
tsk.set_outputs(self.path.find_or_declare(get_target_name(self)))
tsk.chmod=self.chmod
self.link_task=tsk
def apply_lib_vars(self):
env=self.env
uselib=self.to_list(self.uselib)
seen=[]
names=self.to_list(self.uselib_local)[:]
while names:
x=names.pop(0)
if x in seen:
continue
y=self.name_to_obj(x)
if not y:
raise Utils.WafError("object '%s' was not found in uselib_local (required by '%s')"%(x,self.name))
if getattr(y,'uselib_local',None):
lst=y.to_list(y.uselib_local)
for u in lst:
if not u in seen:
names.append(u)
y.post()
seen.append(x)
libname=y.target[y.target.rfind(os.sep)+1:]
if'cshlib'in y.features or'cprogram'in y.features:
env.append_value('LIB',libname)
elif'cstaticlib'in y.features:
env.append_value('STATICLIB',libname)
if y.link_task is not None:
self.link_task.set_run_after(y.link_task)
dep_nodes=getattr(self.link_task,'dep_nodes',[])
self.link_task.dep_nodes=dep_nodes+y.link_task.outputs
tmp_path=y.link_task.outputs[0].parent.bldpath(self.env)
if not tmp_path in env['LIBPATH']:env.prepend_value('LIBPATH',tmp_path)
morelibs=y.to_list(y.uselib)
for v in morelibs:
if v in uselib:continue
uselib=[v]+uselib
if getattr(y,'export_incdirs',None):
cpppath_st=self.env['CPPPATH_ST']
for x in self.to_list(y.export_incdirs):
node=y.path.find_dir(x)
if not node:
raise Utils.WafError('object %s: invalid folder %s in export_incdirs'%(y.target,x))
self.env.append_unique('INC_PATHS',node)
for x in uselib:
for v in self.p_flag_vars:
val=self.env[v+'_'+x]
if val:self.env.append_value(v,val)
def apply_objdeps(self):
if not getattr(self,'add_objects',None):return
seen=[]
names=self.to_list(self.add_objects)
while names:
x=names[0]
if x in seen:
names=names[1:]
continue
y=self.name_to_obj(x)
if not y:
raise Utils.WafError("object '%s' was not found in uselib_local (required by add_objects '%s')"%(x,self.name))
if getattr(y,'add_objects',None):
added=0
lst=y.to_list(y.add_objects)
lst.reverse()
for u in lst:
if u in seen:continue
added=1
names=[u]+names
if added:continue
y.post()
seen.append(x)
for t in y.compiled_tasks:
self.link_task.inputs.extend(t.outputs)
def apply_obj_vars(self):
v=self.env
lib_st=v['LIB_ST']
staticlib_st=v['STATICLIB_ST']
libpath_st=v['LIBPATH_ST']
staticlibpath_st=v['STATICLIBPATH_ST']
rpath_st=v['RPATH_ST']
app=v.append_unique
if v['FULLSTATIC']:
v.append_value('LINKFLAGS',v['FULLSTATIC_MARKER'])
for i in v['RPATH']:
if i and rpath_st:
app('LINKFLAGS',rpath_st%i)
for i in v['LIBPATH']:
app('LINKFLAGS',libpath_st%i)
app('LINKFLAGS',staticlibpath_st%i)
if v['STATICLIB']:
v.append_value('LINKFLAGS',v['STATICLIB_MARKER'])
k=[(staticlib_st%i)for i in v['STATICLIB']]
app('LINKFLAGS',k)
if not v['FULLSTATIC']:
if v['STATICLIB']or v['LIB']:
v.append_value('LINKFLAGS',v['SHLIB_MARKER'])
app('LINKFLAGS',[lib_st%i for i in v['LIB']])
def apply_vnum(self):
if sys.platform not in('win32','cygwin','darwin'):
try:
nums=self.vnum.split('.')
except AttributeError:
pass
else:
try:name3=self.soname
except AttributeError:name3=self.link_task.outputs[0].name+'.'+nums[0]
self.link_task.outputs.append(self.link_task.outputs[0].parent.find_or_declare(name3))
self.env.append_value('LINKFLAGS',(self.env['SONAME_ST']%name3).split())
def apply_implib(self):
if win_platform:
dll=self.link_task.outputs[0]
implib=dll.parent.find_or_declare(self.env['implib_PATTERN']%os.path.split(self.target)[1])
self.link_task.outputs.append(implib)
if sys.platform=='cygwin':
pass
elif sys.platform=='win32':
self.env.append_value('LINKFLAGS',(self.env['IMPLIB_ST']%implib.bldpath(self.env)).split())
self.link_task.install=install_implib
def process_obj_files(self):
if not hasattr(self,'obj_files'):return
for x in self.obj_files:
node=self.path.find_resource(x)
self.link_task.inputs.append(node)
def add_obj_file(self,file):
if not hasattr(self,'obj_files'):self.obj_files=[]
if not'process_obj_files'in self.meths:self.meths.append('process_obj_files')
self.obj_files.append(file)
c_attrs={'cxxflag':'CXXFLAGS','cflag':'CCFLAGS','ccflag':'CCFLAGS','linkflag':'LINKFLAGS','ldflag':'LINKFLAGS','lib':'LIB','libpath':'LIBPATH','staticlib':'STATICLIB','staticlibpath':'STATICLIBPATH','rpath':'RPATH','framework':'FRAMEWORK','frameworkpath':'FRAMEWORKPATH'}
def add_extra_flags(self):
for x in self.__dict__.keys():
y=x.lower()
if y[-1]=='s':
y=y[:-1]
if c_attrs.get(y,None):
self.env.append_unique(c_attrs[y],getattr(self,x))
def link_vnum(self):
clsname=self.__class__.__name__.replace('vnum_','')
out=self.outputs
self.outputs=out[1:]
ret=Task.TaskBase.classes[clsname].__dict__['run'](self)
self.outputs=out
if ret:
return ret
try:
os.remove(self.outputs[0].abspath(self.env))
except OSError:
pass
try:
os.symlink(self.outputs[1].name,self.outputs[0].bldpath(self.env))
except:
return 1
def post_dll_link(self):
if sys.platform=='cygwin':
try:
os.remove(self.outputs[1].abspath(self.env))
except OSError:
pass
try:
os.symlink(self.outputs[0].name,self.outputs[1].bldpath(self.env))
except:
return 1
feature('cc','cxx')(default_cc)
before('apply_core')(default_cc)
feature('cprogram','dprogram','cstaticlib','dstaticlib','cshlib','dshlib')(apply_verif)
feature('cprogram','dprogram')(vars_target_cprogram)
before('apply_core')(vars_target_cprogram)
feature('cstaticlib','dstaticlib')(vars_target_cstaticlib)
before('apply_core')(vars_target_cstaticlib)
feature('cshlib','dshlib')(vars_target_cshlib)
before('apply_core')(vars_target_cshlib)
feature('cprogram','dprogram','cstaticlib','dstaticlib','cshlib','dshlib')(install_target_cstaticlib)
after('apply_objdeps','apply_link')(install_target_cstaticlib)
feature('cshlib','dshlib')(install_target_cshlib)
after('apply_link')(install_target_cshlib)
feature('cc','cxx')(apply_incpaths)
after('apply_type_vars','apply_lib_vars','apply_core')(apply_incpaths)
feature('cc','cxx')(apply_type_vars)
after('init_cc','init_cxx')(apply_type_vars)
before('apply_lib_vars')(apply_type_vars)
feature('cprogram','cshlib','cstaticlib')(apply_link)
after('apply_core')(apply_link)
feature('cc','cxx')(apply_lib_vars)
after('apply_link','init_cc','init_cxx')(apply_lib_vars)
feature('cprogram','cstaticlib','cshlib')(apply_objdeps)
after('apply_obj_vars','apply_vnum','apply_implib','apply_link')(apply_objdeps)
feature('cprogram','cshlib','cstaticlib')(apply_obj_vars)
after('apply_lib_vars')(apply_obj_vars)
feature('cshlib')(apply_vnum)
after('apply_link')(apply_vnum)
before('apply_lib_vars')(apply_vnum)
feature('implib')(apply_implib)
after('apply_link')(apply_implib)
before('apply_lib_vars')(apply_implib)
after('apply_link')(process_obj_files)
taskgen(add_obj_file)
feature('cc','cxx')(add_extra_flags)
before('init_cxx','init_cc')(add_extra_flags)
before('apply_lib_vars','apply_obj_vars','apply_incpaths','init_cc')(add_extra_flags)<|fim▁end|> | def install_target_cshlib(self):
if getattr(self,'vnum','')and not win_platform:
self.link_task.vnum=self.vnum
self.link_task.install=install_shlib |
<|file_name|>XmlTaskMapper.java<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2017 Antony Esik
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ae.camunda.dispatcher.mapper.xml;
import com.ae.camunda.dispatcher.api.mapper.TaskMapper;
import com.ae.camunda.dispatcher.exception.CamundaMappingException;
import org.eclipse.persistence.jaxb.JAXBContextFactory;
import org.springframework.stereotype.Component;
import javax.xml.bind.JAXBContext;<|fim▁hole|>import java.util.Collections;
/**
* @author AEsik
* Date 09.10.2017
*/
@Component
public class XmlTaskMapper implements TaskMapper {
@Override
public String map(Object task) {
try {
JAXBContext context = JAXBContextFactory.createContext(new Class[]{task.getClass()}, Collections.emptyMap());
StringWriter sw = new StringWriter();
context.createMarshaller().marshal(task, sw);
return sw.toString();
} catch (JAXBException e) {
throw new CamundaMappingException(e);
}
}
@Override
public Object map(String body, Class<?> clazz) {
try {
JAXBContext context = JAXBContextFactory.createContext(new Class[]{clazz}, Collections.emptyMap());
StringReader sr = new StringReader(body);
return context.createUnmarshaller().unmarshal(sr);
} catch (JAXBException e) {
throw new CamundaMappingException(e);
}
}
}<|fim▁end|> | import javax.xml.bind.JAXBException;
import java.io.StringReader;
import java.io.StringWriter; |
<|file_name|>CCLoaders.js<|end_file_name|><|fim▁begin|>/****************************************************************************
Copyright (c) 2011-2012 cocos2d-x.org
Copyright (c) 2013-2014 Chukong Technologies Inc.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************/
cc._txtLoader = {
load : function(realUrl, url, res, cb){
cc.loader.loadTxt(realUrl, cb);
}
};
cc.loader.register(["txt", "xml", "vsh", "fsh", "atlas"], cc._txtLoader);
cc._jsonLoader = {
load : function(realUrl, url, res, cb){
cc.loader.loadJson(realUrl, cb);
}
};
cc.loader.register(["json", "ExportJson"], cc._jsonLoader);
cc._jsLoader = {
load : function(realUrl, url, res, cb){
cc.loader.loadJs(realUrl, cb);
}
};
cc.loader.register(["js"], cc._jsLoader);
cc._imgLoader = {
load : function(realUrl, url, res, cb){
cc.loader.cache[url] = cc.loader.loadImg(realUrl, function(err, img){
if(err)
return cb(err);
cc.textureCache.handleLoadedTexture(url);
cb(null, img);
});
}
};
cc.loader.register(["png", "jpg", "bmp","jpeg","gif", "ico", "tiff", "webp"], cc._imgLoader);
cc._serverImgLoader = {
load : function(realUrl, url, res, cb){
cc.loader.cache[url] = cc.loader.loadImg(res.src, function(err, img){
if(err)
return cb(err);
cc.textureCache.handleLoadedTexture(url);
cb(null, img);
});
}
};
cc.loader.register(["serverImg"], cc._serverImgLoader);
cc._plistLoader = {
load : function(realUrl, url, res, cb){
cc.loader.loadTxt(realUrl, function(err, txt){
if(err)
return cb(err);
cb(null, cc.plistParser.parse(txt));
});
}
};
cc.loader.register(["plist"], cc._plistLoader);
cc._fontLoader = {
TYPE : {
".eot" : "embedded-opentype",
".ttf" : "truetype",
".ttc" : "truetype",
".woff" : "woff",
".svg" : "svg"
},
_loadFont : function(name, srcs, type){
var doc = document, path = cc.path, TYPE = this.TYPE, fontStyle = document.createElement("style");
fontStyle.type = "text/css";
doc.body.appendChild(fontStyle);
var fontStr = "";
if(isNaN(name - 0))
fontStr += "@font-face { font-family:" + name + "; src:";
else
fontStr += "@font-face { font-family:'" + name + "'; src:";
if(srcs instanceof Array){
for(var i = 0, li = srcs.length; i < li; i++){
var src = srcs[i];
type = path.extname(src).toLowerCase();
fontStr += "url('" + srcs[i] + "') format('" + TYPE[type] + "')";
fontStr += (i === li - 1) ? ";" : ",";
}
}else{
type = type.toLowerCase();
fontStr += "url('" + srcs + "') format('" + TYPE[type] + "');";
}
fontStyle.textContent += fontStr + "}";
//<div style="font-family: PressStart;">.</div>
var preloadDiv = document.createElement("div");
var _divStyle = preloadDiv.style;
_divStyle.fontFamily = name;<|fim▁hole|> preloadDiv.innerHTML = ".";
_divStyle.position = "absolute";
_divStyle.left = "-100px";
_divStyle.top = "-100px";
doc.body.appendChild(preloadDiv);
},
load : function(realUrl, url, res, cb){
var self = this;
var type = res.type, name = res.name, srcs = res.srcs;
if(cc.isString(res)){
type = cc.path.extname(res);
name = cc.path.basename(res, type);
self._loadFont(name, res, type);
}else{
self._loadFont(name, srcs);
}
if(document.fonts){
document.fonts.load("1em " + name).then(function(){
cb(null, true);
}, function(err){
cb(err);
});
}else{
cb(null, true);
}
}
};
cc.loader.register(["font", "eot", "ttf", "woff", "svg", "ttc"], cc._fontLoader);
cc._binaryLoader = {
load : function(realUrl, url, res, cb){
cc.loader.loadBinary(realUrl, cb);
}
};
cc._csbLoader = {
load: function(realUrl, url, res, cb){
cc.loader.loadCsb(realUrl, cb);
}
};
cc.loader.register(["csb"], cc._csbLoader);<|fim▁end|> | |
<|file_name|>classes_1.js<|end_file_name|><|fim▁begin|>var searchData=
[
['changedetailsapi',['ChangeDetailsAPI',['../classbackend_1_1api_1_1users_1_1_change_details_a_p_i.html',1,'backend::api::users']]],
['chatmessageapi',['ChatMessageApi',['../classbackend_1_1api_1_1messages_1_1_chat_message_api.html',1,'backend::api::messages']]],<|fim▁hole|><|fim▁end|> | ['chatuserapi',['ChatUserApi',['../classbackend_1_1api_1_1messages_1_1_chat_user_api.html',1,'backend::api::messages']]],
['chatuserretrieveapi',['ChatUserRetrieveApi',['../classbackend_1_1api_1_1messages_1_1_chat_user_retrieve_api.html',1,'backend::api::messages']]]
]; |
<|file_name|>user.py<|end_file_name|><|fim▁begin|>from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _
from selvbetjening.sadmin2 import menu
from selvbetjening.sadmin2.decorators import sadmin_prerequisites
from selvbetjening.sadmin2.forms import UserForm, PasswordForm
from selvbetjening.sadmin2.views.generic import generic_create_view
@sadmin_prerequisites
def user_change(request, user_pk):
user = get_object_or_404(get_user_model(), pk=user_pk)
context = {
'sadmin2_menu_main_active': 'userportal',
'sadmin2_breadcrumbs_active': 'user',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_user,
'sadmin2_menu_tab_active': 'user',
'user': user
}
return generic_create_view(request,
UserForm,
reverse('sadmin2:user', kwargs={'user_pk': user.pk}),
message_success=_('User updated'),
context=context,
instance=user)
@sadmin_prerequisites
def user_password(request, user_pk):<|fim▁hole|> context = {
'sadmin2_menu_main_active': 'userportal',
'sadmin2_breadcrumbs_active': 'user_password',
'sadmin2_menu_tab': menu.sadmin2_menu_tab_user,
'sadmin2_menu_tab_active': 'password',
'user': user
}
return generic_create_view(request,
PasswordForm,
redirect_success_url=reverse('sadmin2:user_password', kwargs={'user_pk': user.pk}),
message_success=_('Password updated'),
context=context,
instance=user)<|fim▁end|> |
user = get_object_or_404(get_user_model(), pk=user_pk)
|
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>mod shell;
mod syscall;
use self::syscall::*;
#[no_mangle]
pub extern "C" fn el0_other() -> ! {
println!("I just exit.");
//loop {}
syscall_exit(127);
}
#[no_mangle]
pub extern "C" fn el0_shell() -> ! {
println!("I sleep 50ms.");
syscall_sleep(50).unwrap();
println!("And then I just shell.");
loop {
let code = shell::shell("usr> ");
println!("exit with code: {}", code);
}
}
#[no_mangle]
pub extern "C" fn el0_init() -> ! {
println!("im in a bear suite");
unsafe { asm!("brk 1" :::: "volatile"); }
println!("fuck you shit: {}", 555);
unsafe { asm!("brk 2" :::: "volatile"); }
//loop {}
println!("test GPIO");
use pi::gpio::{Gpio, GPIO_BASE};
use pi::common::IO_BASE_RAW;
let mut r = unsafe { Gpio::new_from(IO_BASE_RAW + GPIO_BASE, 21).into_output() };
let mut g = unsafe { Gpio::new_from(IO_BASE_RAW + GPIO_BASE, 20).into_output() };<|fim▁hole|>
let mut motor = unsafe { Gpio::new_from(IO_BASE_RAW + GPIO_BASE, 26).into_output() };
let mut btn = unsafe { Gpio::new_from(IO_BASE_RAW + GPIO_BASE, 16).into_input() };
let mut led = 0;
loop {
if led & 1 != 0 { r.set() } else { r.clear() }
if led & 2 != 0 { g.set() } else { g.clear() }
if led & 4 != 0 { b.set() } else { b.clear() }
if !btn.level() {
led += 1;
motor.set();
syscall_sleep(5).unwrap();
motor.clear();
syscall_sleep(100).unwrap();
}
}
}<|fim▁end|> | let mut b = unsafe { Gpio::new_from(IO_BASE_RAW + GPIO_BASE, 19).into_output() }; |
<|file_name|>legendImport.js<|end_file_name|><|fim▁begin|>import {FileSelect} from 'widget/fileSelect'
import {Form, form} from 'widget/form/form'
import {FormCombo} from 'widget/form/combo'
import {Layout} from 'widget/layout'
import {Panel} from 'widget/panel/panel'
import {activatable} from 'widget/activation/activatable'
import {compose} from 'compose'
import {msg} from 'translate'
import {parseCsvFile$} from 'csv'
import {withRecipe} from '../body/process/recipeContext'
import Color from 'color'
import Icon from 'widget/icon'
import Label from 'widget/label'
import React from 'react'
import _ from 'lodash'
import guid from 'guid'
import styles from './legendImport.module.css'
const fields = {
rows: new Form.Field()
.notEmpty(),
name: new Form.Field()
.notBlank(),
valueColumn: new Form.Field()
.notBlank(),
labelColumn: new Form.Field()
.notBlank(),
colorColumnType: new Form.Field()
.notBlank(),
colorColumn: new Form.Field()
.skip((v, {colorColumnType}) => colorColumnType !== 'single')
.notBlank(),
redColumn: new Form.Field()
.skip((v, {colorColumnType}) => colorColumnType !== 'multiple')
.notBlank(),
greenColumn: new Form.Field()
.skip((v, {colorColumnType}) => colorColumnType !== 'multiple')
.notBlank(),
blueColumn: new Form.Field()
.skip((v, {colorColumnType}) => colorColumnType !== 'multiple')
.notBlank()
}
class _LegendImport extends React.Component {
state = {
columns: undefined,
rows: undefined,
validMappings: undefined
}
render() {
const {activatable: {deactivate}, form} = this.props
const invalid = form.isInvalid()
return (
<Panel type='modal' className={styles.panel}>
<Panel.Header
icon='file-import'
title={msg('map.legendBuilder.import.title')}
/>
<Panel.Content>
{this.renderContent()}
</Panel.Content>
<Panel.Buttons onEscape={deactivate} onEnter={() => invalid || this.save()}>
<Panel.Buttons.Main>
<Panel.Buttons.Cancel onClick={deactivate}/>
<Panel.Buttons.Apply
disabled={invalid}
onClick={() => this.save()}
/>
</Panel.Buttons.Main>
</Panel.Buttons>
</Panel>
)
}
renderContent() {
const {validMappings} = this.state
return (
<Layout>
{this.renderFileSelect()}
{validMappings ? this.renderForm() : null}
</Layout>
)
}
renderForm() {
const {inputs: {colorColumnType}} = this.props
return (
<React.Fragment>
{this.renderColorColumnType()}
{colorColumnType.value === 'single'<|fim▁hole|> {this.renderMapping('redColumn')}
{this.renderMapping('greenColumn')}
{this.renderMapping('blueColumn')}
</Layout>
)}
<Layout type='horizontal'>
{this.renderMapping('valueColumn')}
{this.renderMapping('labelColumn')}
</Layout>
</React.Fragment>
)
}
renderColorColumnType() {
const {inputs: {colorColumnType}} = this.props
return (
<Form.Buttons
label={msg('map.legendBuilder.import.colorColumnType.label')}
tooltip={msg('map.legendBuilder.import.colorColumnType.tooltip')}
input={colorColumnType}
options={[
{value: 'single', label: msg('map.legendBuilder.import.colorColumnType.single')},
{value: 'multiple', label: msg('map.legendBuilder.import.colorColumnType.multiple')},
]}
/>
)
}
renderMapping(name) {
const {inputs} = this.props
const {validMappings} = this.state
const options = (validMappings[name] || []).map(column => ({value: column, label: column}))
return (
<FormCombo
className={styles.field}
input={inputs[name]}
options={options}
label={msg(['map.legendBuilder.import.column', name, 'label'])}
placeholder={msg(['map.legendBuilder.import.column', name, 'placeholder'])}
tooltip={msg(['map.legendBuilder.import.column', name, 'tooltip'])}
onChange={({value}) => this.selectedColumn(name, value)}
/>
)
}
renderFileSelect() {
const {stream, inputs: {name}} = this.props
return (
<Layout spacing={'compact'}>
<Label>{msg('map.legendBuilder.import.file.label')}</Label>
<FileSelect
single
onSelect={file => this.onSelectFile(file)}>
{name.value
? <div>
{stream('LOAD_CSV_ROWS').active
? <Icon name={'spinner'} className={styles.spinner}/>
: null}
{name.value}
</div>
: null
}
</FileSelect>
</Layout>
)
}
componentDidUpdate(prevProps, prevState) {
const {rows: prevRows} = prevState
const {rows} = this.state
if (rows !== prevRows) {
this.setDefaults()
}
}
selectedColumn(field, column) {
const {inputs} = this.props;
['valueColumn', 'labelColumn', 'colorColumn', 'redColumn', 'blueColumn', 'greenColumn']
.filter(f => f !== field)
.forEach(f => {
if (inputs[f].value === column) {
inputs[f].set(null) // TODO: This is not having any effect
}
})
}
setDefaults() {
const {inputs} = this.props
const {columns, rows} = this.state
const validMappings = getValidMappings(columns, rows)
this.setState({validMappings})
const defaults = getDefaults(columns, rows, validMappings)
Object.keys(defaults).forEach(field => inputs[field].set(defaults[field]))
}
onSelectFile(file) {
const {stream, inputs: {name, rows: rowsInput}} = this.props
name.set(file.name)
stream('LOAD_CSV_ROWS',
parseCsvFile$(file),
({columns, rows}) => {
rowsInput.set(rows)
this.setState({columns, rows})
}
)
}
save() {
const {inputs, recipeActionBuilder, activatable: {deactivate}} = this.props
const {
rows,
valueColumn,
labelColumn,
colorColumnType,
colorColumn,
redColumn,
greenColumn,
blueColumn
} = inputs
const entries = rows.value.map(row => ({
id: guid(),
color: colorColumnType.value === 'single'
? Color(trim(row[colorColumn.value])).hex()
: Color.rgb([
trim(row[redColumn.value]),
trim(row[greenColumn.value]),
trim(row[blueColumn.value])
]).hex(),
value: trim(row[valueColumn.value]),
label: trim(row[labelColumn.value])
}))
recipeActionBuilder('SET_IMPORTED_LEGEND_ENTRIES', {entries})
.set('ui.importedLegendEntries', entries)
.dispatch()
deactivate()
}
}
const policy = () => ({_: 'allow'})
const trim = value => _.isString(value) ? value.trim() : value
export const LegendImport = compose(
_LegendImport,
activatable({
id: 'legendImport',
policy,
alwaysAllow: true
}),
withRecipe(),
form({fields}),
)
export const getValidMappings = (columns, rows) => {
const toInts = column => {
return rows
.map(row => {
const value = row[column]
try {
return _.isInteger(value)
? value
: _.isInteger(parseInt(value))
? parseInt(value)
: null
} catch (_e) {
return false
}
})
.filter(i => _.isInteger(i))
}
const valueColumn = columns.filter(column =>
_.uniq(toInts(column)).length === rows.length
)
const labelColumn = columns.filter(column =>
_.uniq(rows
.map(row => _.isNaN(row[column])
? null
: _.isNil(row[column]) ? null : row[column].toString().trim()
)
.filter(value => value)
).length === rows.length
)
const colorColumn = columns.filter(column =>
_.uniq(rows
.map(row => {
try {
return Color(row[column].trim()).hex()
} catch(_e) {
return false
}
})
.filter(value => value)
).length === rows.length
)
const colorChannel = columns.filter(column =>
toInts(column).length === rows.length
)
return ({valueColumn, labelColumn, colorColumn, redColumn: colorChannel, greenColumn: colorChannel, blueColumn: colorChannel})
}
export const getDefaults = (columns, rows, validMappings) => {
const mappings = _.cloneDeep(validMappings)
const selectedColumn = column => {
if (!column) return
Object.keys(mappings).forEach(key =>
mappings[key] = mappings[key].filter(c => c !== column)
)
}
const firstContaining = (columns, strings) =>
columns.find(column => strings.find(s => column.toLowerCase().includes(s.toLowerCase())))
const colorColumnType = mappings.colorColumn.length
? 'single'
: (mappings.redColumn.length >= 4 &&
mappings.greenColumn.length >= 4 &&
mappings.blueColumn.length >= 4)
? 'multiple'
: null
const colorColumn = mappings.colorColumn.length
? mappings.colorColumn[0]
: null
selectedColumn(colorColumn)
const valueColumn = mappings.valueColumn.length
? colorColumnType === 'single'
? mappings.valueColumn[0]
: firstContaining(mappings.valueColumn, ['class', 'value', 'type'])
: null
selectedColumn(valueColumn)
const labelColumn = mappings.labelColumn.length
? firstContaining(mappings.labelColumn, ['desc', 'label', 'name'])
: null
selectedColumn(labelColumn)
const redColumn = mappings.redColumn.length
? firstContaining(mappings.redColumn, ['red'])
: null
selectedColumn(redColumn)
const greenColumn = mappings.greenColumn.length
? firstContaining(mappings.greenColumn, ['green'])
: null
selectedColumn(greenColumn)
const blueColumn = mappings.blueColumn.length
? firstContaining(mappings.blueColumn, ['blue'])
: null
selectedColumn(blueColumn)
return _.transform({
valueColumn,
labelColumn,
colorColumnType,
colorColumn,
redColumn,
greenColumn,
blueColumn
}, (defaults, value, key) => {
if (value) {
defaults[key] = value
}
return defaults
})
}<|fim▁end|> | ? this.renderMapping('colorColumn')
: (
<Layout type='horizontal-nowrap'> |
<|file_name|>vtgate_cursor.py<|end_file_name|><|fim▁begin|># Copyright 2012, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""VTGateCursor, and StreamVTGateCursor."""
import itertools
import operator
import re
from vtdb import base_cursor
from vtdb import dbexceptions
write_sql_pattern = re.compile(r'\s*(insert|update|delete)', re.IGNORECASE)
def ascii_lower(string):
"""Lower-case, but only in the ASCII range."""
return string.encode('utf8').lower().decode('utf8')
class VTGateCursorMixin(object):
def connection_list(self):
return [self._conn]
def is_writable(self):
return self._writable
class VTGateCursor(base_cursor.BaseListCursor, VTGateCursorMixin):
"""A cursor for execute statements to VTGate.
Results are stored as a list.
"""
def __init__(
self, connection, tablet_type, keyspace=None,
shards=None, keyspace_ids=None, keyranges=None,
writable=False, as_transaction=False, single_db=False,
twopc=False):
"""Init VTGateCursor.
Args:
connection: A PEP0249 connection object.
tablet_type: Str tablet_type.
keyspace: Str keyspace or None if batch API will be used.
shards: List of strings.
keyspace_ids: Struct('!Q').packed keyspace IDs.
keyranges: Str keyranges.
writable: True if writable.
as_transaction: True if an executemany call is its own transaction.
single_db: True if single db transaction is needed.
twopc: True if 2-phase commit is needed.
"""
super(VTGateCursor, self).__init__(single_db=single_db, twopc=twopc)
self._conn = connection
self._writable = writable
self.description = None
self.index = None
self.keyspace = keyspace
self.shards = shards
self.keyspace_ids = keyspace_ids
self.keyranges = keyranges
self.lastrowid = None
self.results = None
self.routing = None
self.rowcount = 0
self.tablet_type = tablet_type
self.as_transaction = as_transaction
self._clear_batch_state()
# pass kwargs here in case higher level APIs need to push more data through
# for instance, a key value for shard mapping
def execute(self, sql, bind_variables, **kwargs):
"""Perform a query, return the number of rows affected."""
self._clear_list_state()
self._clear_batch_state()
if self._handle_transaction_sql(sql):
return
entity_keyspace_id_map = kwargs.pop('entity_keyspace_id_map', None)
entity_column_name = kwargs.pop('entity_column_name', None)
write_query = bool(write_sql_pattern.match(sql))
# NOTE: This check may also be done at higher layers but adding it
# here for completion.
if write_query:
if not self.is_writable():
raise dbexceptions.ProgrammingError('DML on a non-writable cursor', sql)
if entity_keyspace_id_map:
raise dbexceptions.ProgrammingError(
'entity_keyspace_id_map is not allowed for write queries')
# FIXME(alainjobart): the entity_keyspace_id_map should be in the
# cursor, same as keyspace_ids, shards, keyranges, to avoid this hack.
if entity_keyspace_id_map:
shards = None
keyspace_ids = None
keyranges = None
else:
shards = self.shards
keyspace_ids = self.keyspace_ids
keyranges = self.keyranges
self.results, self.rowcount, self.lastrowid, self.description = (
self.connection._execute( # pylint: disable=protected-access
sql,
bind_variables,
tablet_type=self.tablet_type,
keyspace_name=self.keyspace,
shards=shards,
keyspace_ids=keyspace_ids,
keyranges=keyranges,
entity_keyspace_id_map=entity_keyspace_id_map,
entity_column_name=entity_column_name,
not_in_transaction=not self.is_writable(),
effective_caller_id=self.effective_caller_id,
**kwargs))
return self.rowcount
def fetch_aggregate_function(self, func):
return func(row[0] for row in self.fetchall())
def fetch_aggregate(self, order_by_columns, limit):
"""Fetch from many shards, sort, then remove sort columns.
A scatter query may return up to limit rows. Sort all results
manually order them, and return the first rows.
This is a special-use function.
Args:
order_by_columns: The ORDER BY clause. Each element is either a
column, [column, 'ASC'], or [column, 'DESC'].
limit: Int limit.
Returns:
Smallest rows, with up to limit items. First len(order_by_columns)
columns are stripped.
"""
sort_columns = []
desc_columns = []
for order_clause in order_by_columns:
if isinstance(order_clause, (tuple, list)):
sort_columns.append(order_clause[0])
if ascii_lower(order_clause[1]) == 'desc':
desc_columns.append(order_clause[0])
else:
sort_columns.append(order_clause)
# sort the rows and then trim off the prepended sort columns
if sort_columns:
sorted_rows = list(sort_row_list_by_columns(
self.fetchall(), sort_columns, desc_columns))[:limit]
else:<|fim▁hole|> def _clear_batch_state(self):
"""Clear state that allows traversal to next query's results."""
self.result_sets = []
self.result_set_index = None
def close(self):
super(VTGateCursor, self).close()
self._clear_batch_state()
def executemany(self, sql, params_list, **kwargs):
"""Execute multiple statements in one batch.
This adds len(params_list) result_sets to self.result_sets. Each
result_set is a (results, rowcount, lastrowid, fields) tuple.
Each call overwrites the old result_sets. After execution, nextset()
is called to move the fetch state to the start of the first
result set.
Args:
sql: The sql text, with %(format)s-style tokens. May be None.
params_list: A list of the keyword params that are normally sent
to execute. Either the sql arg or params['sql'] must be defined.
**kwargs: passed as is to connection._execute_batch.
"""
if sql:
sql_list = [sql] * len(params_list)
else:
sql_list = [params.get('sql') for params in params_list]
bind_variables_list = [params['bind_variables'] for params in params_list]
keyspace_list = [params['keyspace'] for params in params_list]
keyspace_ids_list = [params.get('keyspace_ids') for params in params_list]
shards_list = [params.get('shards') for params in params_list]
self._clear_batch_state()
# Find other _execute_batch calls in test code.
self.result_sets = self.connection._execute_batch( # pylint: disable=protected-access
sql_list, bind_variables_list, keyspace_list, keyspace_ids_list,
shards_list,
self.tablet_type, self.as_transaction, self.effective_caller_id,
**kwargs)
self.nextset()
def nextset(self):
"""Move the fetch state to the start of the next result set.
self.(results, rowcount, lastrowid, description) will be set to
the next result_set, and the fetch-commands will work on this
result set.
Returns:
True if another result set exists, False if not.
"""
if self.result_set_index is None:
self.result_set_index = 0
else:
self.result_set_index += 1
self._clear_list_state()
if self.result_set_index < len(self.result_sets):
self.results, self.rowcount, self.lastrowid, self.description = (
self.result_sets[self.result_set_index])
return True
else:
self._clear_batch_state()
return None
class StreamVTGateCursor(base_cursor.BaseStreamCursor, VTGateCursorMixin):
"""A cursor for streaming statements to VTGate.
Results are returned as a generator.
"""
def __init__(
self, connection, tablet_type, keyspace=None,
shards=None, keyspace_ids=None,
keyranges=None, writable=False):
super(StreamVTGateCursor, self).__init__()
self._conn = connection
self._writable = writable
self.keyspace = keyspace
self.shards = shards
self.keyspace_ids = keyspace_ids
self.keyranges = keyranges
self.routing = None
self.tablet_type = tablet_type
def is_writable(self):
return self._writable
# pass kwargs here in case higher level APIs need to push more data through
# for instance, a key value for shard mapping
def execute(self, sql, bind_variables, **kwargs):
"""Start a streaming query."""
if self._writable:
raise dbexceptions.ProgrammingError('Streaming query cannot be writable')
self._clear_stream_state()
self.generator, self.description = self.connection._stream_execute( # pylint: disable=protected-access
sql,
bind_variables,
tablet_type=self.tablet_type,
keyspace_name=self.keyspace,
shards=self.shards,
keyspace_ids=self.keyspace_ids,
keyranges=self.keyranges,
not_in_transaction=not self.is_writable(),
effective_caller_id=self.effective_caller_id,
**kwargs)
return 0
def sort_row_list_by_columns(row_list, sort_columns=(), desc_columns=()):
"""Sort by leading sort columns by stable-sorting in reverse-index order."""
for column_index, column_name in reversed(
[x for x in enumerate(sort_columns)]):
og = operator.itemgetter(column_index)
if not isinstance(row_list, list):
row_list = sorted(
row_list, key=og, reverse=bool(column_name in desc_columns))
else:
row_list.sort(key=og, reverse=bool(column_name in desc_columns))
return row_list<|fim▁end|> | sorted_rows = itertools.islice(self.fetchall(), limit)
neutered_rows = [row[len(order_by_columns):] for row in sorted_rows]
return neutered_rows
|
<|file_name|>hello.rs<|end_file_name|><|fim▁begin|>use std::ops::Add;
fn main() {
println!("hello, world");
println!("> Primitives");
primitives();
println!("> Tuples");
tuples();
println!("> Arrays");
arrays();
println!("> Structs");
structs();
println!("> References");
references();
println!("> Enums");
enums();
}
fn primitives() {
// List of Rust primitive types
// https://doc.rust-lang.org/book/primitive-types.html
println!("1 + 2 = {}", 1 + 2);
println!("1 - 2 = {}", 1 - 2);
println!("true and true = {}", true && true);
let x: char = 'x';
println!("x = {}", x);
let y: f32 = 3.14;
println!("y = {}", y);
}
fn tuples() {
fn reverse(pair: (i32, i32)) -> (i32, i32) {
let (x, y) = pair;
(y, x)
}
let pair = (3, 2);
let reversed = reverse(pair);
println!("reversed: ({}, {})", reversed.0, reversed.1);
}
fn arrays() {
fn sum(slice: &[i32]) -> i32 {
let mut total = 0;
for i in 0..slice.len() {
total += slice[i]
}
total
}
let x = [1, 2, 3];
println!("total: {}", sum(&x));
let y = [1, 2, 3, 4, 5];
println!("total: {}", sum(&y));
}
fn structs() {
// this is used to override move semantics.
#[derive(Copy, Clone)]
struct Point {
x: i32,
y: i32,
}
fn add(p1: Point, p2: Point) -> Point {
Point {
x: p1.x + p2.x,
y: p1.y + p2.y,
}
}
impl Add for Point {<|fim▁hole|> x: self.x + other.x,
y: self.y + other.y,
}
}
}
fn print_point(p: Point) {
println!("({} ,{})", p.x, p.y);
}
let p1 = Point { x: 1, y: 2 };
let p2 = Point { x: 4, y: 5 };
let p3 = add(p1, p2);
let p4 = p1 + p2;
print_point(p3);
print_point(p4);
}
fn references() {
// testing mutable reference.
let mut x: i32 = 5;
println!("x: {}", x);
fn increment(x: &mut i32) {
*x = *x + 1;
}
increment(&mut x);
println!("x: {}", x);
// testing immutable references
fn print_twice(x: &i32) {
println!("first: {}", x);
println!("second: {}", x);
}
print_twice(&3);
print_twice(&x);
/*
this code does not compile - taking multiple mutable references.
{
let y = &mut x;
let z = &mut x;
}
*/
/*
this code does not compile - taking mutable & immutable references (they form read-write-lock).
{
let y = & x;
let z = &mut x;
}
*/
}
fn enums() {
// standard enum.
enum Color {
RED,
GREEN,
BLACK,
};
fn categorize_color(c: Color) {
match c {
Color::RED => println!("color is red!"),
Color::GREEN => println!("color is green!"),
_ => println!("Unknown color."),
}
}
categorize_color(Color::RED);
categorize_color(Color::GREEN);
categorize_color(Color::BLACK);
// pattern matching enum.
enum Option {
None,
Some(i32),
}
fn app(o: Option, function: fn(i32) -> i32) -> Option {
match o {
Option::None => Option::None,
Option::Some(x) => Option::Some(function(x)),
}
}
fn add_one(x: i32) -> i32 {
x + 1
}
fn print_value(x: i32) -> i32 {
println!("value: {}", x);
return 0;
}
app(app(Option::Some(10), add_one), print_value);
app(app(Option::None, add_one), print_value);
}<|fim▁end|> | // operator overloading!
type Output = Point;
fn add(self, other: Point) -> Point {
Point { |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright(C) 2014 Bezleputh
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.<|fim▁hole|># weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .backend import RegionsjobBackend
__all__ = ['RegionsjobBackend']<|fim▁end|> | # |
<|file_name|>SpeakerExample.java<|end_file_name|><|fim▁begin|>package com.hzh.corejava.proxy;<|fim▁hole|> speaker.greeting();
}
}<|fim▁end|> |
public class SpeakerExample {
public static void main(String[] args) {
AiSpeaker speaker = (AiSpeaker) AuthenticationProxy.newInstance(new XiaoaiAiSpeeker()); |
<|file_name|>start-app.js<|end_file_name|><|fim▁begin|>import { run } from '@ember/runloop';
import { merge } from '@ember/polyfills';
import Application from '../../app';
import config from '../../config/environment';
export default function startApp(attrs) {
let application;
let attributes = merge({}, config.APP);
attributes = merge(attributes, attrs); // use defaults, but you can override;<|fim▁hole|> run(() => {
application = Application.create(attributes);
application.setupForTesting();
application.injectTestHelpers();
});
return application;
}<|fim▁end|> | |
<|file_name|>optparse.py<|end_file_name|><|fim▁begin|>"""optparse - a powerful, extensible, and easy-to-use option parser.
By Greg Ward <[email protected]>
Originally distributed as Optik; see http://optik.sourceforge.net/ .
If you have problems with this module, please do not file bugs,
patches, or feature requests with Python; instead, use Optik's
SourceForge project page:
http://sourceforge.net/projects/optik
For support, use the [email protected] mailing list
(http://lists.sourceforge.net/lists/listinfo/optik-users).
"""
# Python developers: please do not make changes to this file, since
# it is automatically generated from the Optik source code.
__version__ = "1.5.1"
__all__ = ['Option',
'SUPPRESS_HELP',
'SUPPRESS_USAGE',
'Values',
'OptionContainer',
'OptionGroup',
'OptionParser',
'HelpFormatter',
'IndentedHelpFormatter',
'TitledHelpFormatter',
'OptParseError',
'OptionError',
'OptionConflictError',
'OptionValueError',
'BadOptionError']
__copyright__ = """
Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os
import string, re
import types
def _repr(self):
return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
# This file was generated from:
# Id: option_parser.py 509 2006-04-20 00:58:24Z gward
# Id: option.py 509 2006-04-20 00:58:24Z gward
# Id: help.py 509 2006-04-20 00:58:24Z gward
# Id: errors.py 509 2006-04-20 00:58:24Z gward
try:
from gettext import gettext
except ImportError:
def gettext(message):
return message
_ = gettext
class OptParseError (Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class OptionError (OptParseError):
"""
Raised if an Option instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class OptionConflictError (OptionError):
"""
Raised if conflicting options are added to an OptionParser.
"""
class OptionValueError (OptParseError):
"""
Raised if an invalid option value is encountered on the command
line.
"""
class BadOptionError (OptParseError):
"""
Raised if an invalid option is seen on the command line.
"""
def __init__(self, opt_str):
self.opt_str = opt_str
def __str__(self):
return _("no such option: %s") % self.opt_str
class AmbiguousOptionError (BadOptionError):
"""
Raised if an ambiguous option is seen on the command line.
"""
def __init__(self, opt_str, possibilities):
BadOptionError.__init__(self, opt_str)
self.possibilities = possibilities
def __str__(self):
return (_("ambiguous option: %s (%s?)")
% (self.opt_str, ", ".join(self.possibilities)))
class HelpFormatter:
"""
Abstract base class for formatting option help. OptionParser
instances should use one of the HelpFormatter subclasses for
formatting help; by default IndentedHelpFormatter is used.
Instance attributes:
parser : OptionParser
the controlling OptionParser instance
indent_increment : int
the number of columns to indent per nesting level
max_help_position : int
the maximum starting column for option help text
help_position : int
the calculated starting column for option help text;
initially the same as the maximum
width : int
total number of columns for output (pass None to constructor for
this value to be taken from the $COLUMNS environment variable)
level : int
current indentation level
current_indent : int
current indentation level (in columns)
help_width : int
number of columns available for option help text (calculated)
default_tag : str
text to replace with each option's default value, "%default"
by default. Set to false value to disable default value expansion.
option_strings : { Option : str }
maps Option instances to the snippet of help text explaining
the syntax of that option, e.g. "-h, --help" or
"-fFILE, --file=FILE"
_short_opt_fmt : str
format string controlling how short options with values are
printed in help text. Must be either "%s%s" ("-fFILE") or
"%s %s" ("-f FILE"), because those are the two syntaxes that
Optik supports.
_long_opt_fmt : str
similar but for long options; must be either "%s %s" ("--file FILE")
or "%s=%s" ("--file=FILE").
"""
NO_DEFAULT_VALUE = "none"
def __init__(self,
indent_increment,
max_help_position,
width,
short_first):
self.parser = None
self.indent_increment = indent_increment
self.help_position = self.max_help_position = max_help_position
if width is None:
try:
width = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self.width = width
self.current_indent = 0
self.level = 0
self.help_width = None # computed later
self.short_first = short_first
self.default_tag = "%default"
self.option_strings = {}
self._short_opt_fmt = "%s %s"
self._long_opt_fmt = "%s=%s"
def set_parser(self, parser):
self.parser = parser
def set_short_opt_delimiter(self, delim):
if delim not in ("", " "):
raise ValueError(
"invalid metavar delimiter for short options: %r" % delim)
self._short_opt_fmt = "%s" + delim + "%s"
def set_long_opt_delimiter(self, delim):
if delim not in ("=", " "):
raise ValueError(
"invalid metavar delimiter for long options: %r" % delim)
self._long_opt_fmt = "%s" + delim + "%s"
def indent(self):
self.current_indent += self.indent_increment
self.level += 1
def dedent(self):
self.current_indent -= self.indent_increment
assert self.current_indent >= 0, "Indent decreased below 0."
self.level -= 1
def format_usage(self, usage):
raise NotImplementedError, "subclasses must implement"
def format_heading(self, heading):
raise NotImplementedError, "subclasses must implement"
def _format_text(self, text):
"""
Format a paragraph of free-form text for inclusion in the
help output at the current indentation level.
"""
text_width = self.width - self.current_indent
indent = " "*self.current_indent
return fill(text,
text_width,
initial_indent=indent,
subsequent_indent=indent)
def format_description(self, description):
if description:
return self._format_text(description) + "\n"
else:
return ""
def format_epilog(self, epilog):
if epilog:
return "\n" + self._format_text(epilog) + "\n"
else:
return ""
def expand_default(self, option):
if self.parser is None or not self.default_tag:
return option.help
default_value = self.parser.defaults.get(option.dest)
if default_value is NO_DEFAULT or default_value is None:
default_value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(default_value))
def format_option(self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = wrap(help_text, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def store_option_strings(self, parser):
self.indent()
max_len = 0
for opt in parser.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.indent()
for group in parser.option_groups:
for opt in group.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.dedent()
self.dedent()
self.help_position = min(max_len + 2, self.max_help_position)
self.help_width = self.width - self.help_position
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [self._short_opt_fmt % (sopt, metavar)
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts)
class IndentedHelpFormatter (HelpFormatter):
"""Format help with indented section bodies.
"""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return _("Usage: %s\n") % usage
def format_heading(self, heading):
return "%*s%s:\n" % (self.current_indent, "", heading)
class TitledHelpFormatter (HelpFormatter):
"""Format help with underlined section headers.
"""
def __init__(self,
indent_increment=0,
max_help_position=24,
width=None,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return "%s %s\n" % (self.format_heading(_("Usage")), usage)
def format_heading(self, heading):
return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
def _parse_num(val, type):
if val[:2].lower() == "0x": # hexadecimal
radix = 16
elif val[:2].lower() == "0b": # binary
radix = 2
val = val[2:] or "0" # have to remove "0b" prefix
elif val[:1] == "0": # octal
radix = 8
else: # decimal
radix = 10
return type(val, radix)
def _parse_int(val):
return _parse_num(val, int)
def _parse_long(val):
return _parse_num(val, long)
_builtin_cvt = { "int" : (_parse_int, _("integer")),
"long" : (_parse_long, _("long integer")),
"float" : (float, _("floating-point")),
"complex" : (complex, _("complex")) }
def check_builtin(option, opt, value):
(cvt, what) = _builtin_cvt[option.type]
try:
return cvt(value)
except ValueError:
raise OptionValueError(
_("option %s: invalid %s value: %r") % (opt, what, value))
def check_choice(option, opt, value):
if value in option.choices:
return value
else:
choices = ", ".join(map(repr, option.choices))
raise OptionValueError(
_("option %s: invalid choice: %r (choose from %s)")
% (opt, value, choices))
# Not supplying a default is different from a default of None,
# so we need an explicit "not supplied" value.
NO_DEFAULT = ("NO", "DEFAULT")
class Option:
"""
Instance attributes:
_short_opts : [string]
_long_opts : [string]
action : string
type : string
dest : string
default : any
nargs : int
const : any
choices : [string]
callback : function
callback_args : (any*)
callback_kwargs : { string : any }
help : string
metavar : string
"""
# The list of instance attributes that may be set through
# keyword args to the constructor.
ATTRS = ['action',
'type',
'dest',
'default',
'nargs',
'const',
'choices',
'callback',
'callback_args',
'callback_kwargs',
'help',
'metavar']
# The set of actions allowed by option parsers. Explicitly listed
# here so the constructor can validate its arguments.
ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count",
"callback",
"help",
"version")
# The set of actions that involve storing a value somewhere;
# also listed just for constructor argument validation. (If
# the action is one of these, there must be a destination.)
STORE_ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count")
# The set of actions for which it makes sense to supply a value
# type, ie. which may consume an argument from the command line.
TYPED_ACTIONS = ("store",
"append",
"callback")
# The set of actions which *require* a value type, ie. that
# always consume an argument from the command line.
ALWAYS_TYPED_ACTIONS = ("store",
"append")
# The set of actions which take a 'const' attribute.
CONST_ACTIONS = ("store_const",
"append_const")
# The set of known types for option parsers. Again, listed here for
# constructor argument validation.
TYPES = ("string", "int", "long", "float", "complex", "choice")
# Dictionary of argument checking functions, which convert and
# validate option arguments according to the option type.
#
# Signature of checking functions is:
# check(option : Option, opt : string, value : string) -> any
# where
# option is the Option instance calling the checker
# opt is the actual option seen on the command-line
# (eg. "-a", "--file")
# value is the option argument seen on the command-line
#
# The return value should be in the appropriate Python type
# for option.type -- eg. an integer if option.type == "int".
#
# If no checker is defined for a type, arguments will be
# unchecked and remain strings.
TYPE_CHECKER = { "int" : check_builtin,
"long" : check_builtin,
"float" : check_builtin,
"complex": check_builtin,
"choice" : check_choice,
}
# CHECK_METHODS is a list of unbound method objects; they are called
# by the constructor, in order, after all attributes are
# initialized. The list is created and filled in later, after all
# the methods are actually defined. (I just put it here because I
# like to define and document all class attributes in the same
# place.) Subclasses that add another _check_*() method should
# define their own CHECK_METHODS list that adds their check method
# to those from this class.
CHECK_METHODS = None
# -- Constructor/initialization methods ----------------------------
def __init__(self, *opts, **attrs):
# Set _short_opts, _long_opts attrs from 'opts' tuple.
# Have to be set now, in case no option strings are supplied.
self._short_opts = []
self._long_opts = []
opts = self._check_opt_strings(opts)
self._set_opt_strings(opts)
# Set all other attrs (action, type, etc.) from 'attrs' dict
self._set_attrs(attrs)
# Check all the attributes we just set. There are lots of
# complicated interdependencies, but luckily they can be farmed
# out to the _check_*() methods listed in CHECK_METHODS -- which
# could be handy for subclasses! The one thing these all share
# is that they raise OptionError if they discover a problem.
for checker in self.CHECK_METHODS:
checker(self)
def _check_opt_strings(self, opts):
# Filter out None because early versions of Optik had exactly
# one short option and one long option, either of which
# could be None.
opts = filter(None, opts)
if not opts:
raise TypeError("at least one option string must be supplied")
return opts
def _set_opt_strings(self, opts):
for opt in opts:
if len(opt) < 2:
raise OptionError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise OptionError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise OptionError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def _set_attrs(self, attrs):
for attr in self.ATTRS:
if attrs.has_key(attr):
setattr(self, attr, attrs[attr])
del attrs[attr]
else:
if attr == 'default':
setattr(self, attr, NO_DEFAULT)
else:
setattr(self, attr, None)
if attrs:
raise OptionError(
"invalid keyword arguments: %s" % ", ".join(attrs.keys()),
self)
# -- Constructor validation methods --------------------------------
def _check_action(self):
if self.action is None:
self.action = "store"
elif self.action not in self.ACTIONS:
raise OptionError("invalid action: %r" % self.action, self)
def _check_type(self):
if self.type is None:
if self.action in self.ALWAYS_TYPED_ACTIONS:
if self.choices is not None:
# The "choices" attribute implies "choice" type.
self.type = "choice"
else:
# No type given? "string" is the most sensible default.
self.type = "string"
else:
# Allow type objects or builtin type conversion functions
# (int, str, etc.) as an alternative to their names. (The
# complicated check of __builtin__ is only necessary for
# Python 2.1 and earlier, and is short-circuited by the
# first check on modern Pythons.)
import __builtin__
if ( type(self.type) is types.TypeType or
(hasattr(self.type, "__name__") and
getattr(__builtin__, self.type.__name__, None) is self.type) ):
self.type = self.type.__name__
if self.type == "str":
self.type = "string"
if self.type not in self.TYPES:
raise OptionError("invalid option type: %r" % self.type, self)
if self.action not in self.TYPED_ACTIONS:
raise OptionError(
"must not supply a type for action %r" % self.action, self)
def _check_choice(self):
if self.type == "choice":
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif type(self.choices) not in (types.TupleType, types.ListType):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
def _check_dest(self):
# No destination given, and we need one for this action. The
# self.type check is for callbacks that take a value.
takes_value = (self.action in self.STORE_ACTIONS or
self.type is not None)
if self.dest is None and takes_value:
# Glean a destination from the first long option string,
# or from the first short option string if no long options.
if self._long_opts:
# eg. "--foo-bar" -> "foo_bar"
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
self.dest = self._short_opts[0][1]
def _check_const(self):
if self.action not in self.CONST_ACTIONS and self.const is not None:
raise OptionError(
"'const' must not be supplied for action %r" % self.action,
self)
def _check_nargs(self):
if self.action in self.TYPED_ACTIONS:
if self.nargs is None:
self.nargs = 1
elif self.nargs is not None:
raise OptionError(
"'nargs' must not be supplied for action %r" % self.action,
self)
def _check_callback(self):
if self.action == "callback":
if not callable(self.callback):
raise OptionError(
"callback not callable: %r" % self.callback, self)
if (self.callback_args is not None and
type(self.callback_args) is not types.TupleType):
raise OptionError(
"callback_args, if supplied, must be a tuple: not %r"
% self.callback_args, self)
if (self.callback_kwargs is not None and
type(self.callback_kwargs) is not types.DictType):
raise OptionError(
"callback_kwargs, if supplied, must be a dict: not %r"
% self.callback_kwargs, self)
else:
if self.callback is not None:
raise OptionError(
"callback supplied (%r) for non-callback option"
% self.callback, self)
if self.callback_args is not None:
raise OptionError(
"callback_args supplied for non-callback option", self)
if self.callback_kwargs is not None:
raise OptionError(
"callback_kwargs supplied for non-callback option", self)
CHECK_METHODS = [_check_action,
_check_type,
_check_choice,
_check_dest,
_check_const,
_check_nargs,
_check_callback]
# -- Miscellaneous methods -----------------------------------------
def __str__(self):
return "/".join(self._short_opts + self._long_opts)
__repr__ = _repr
def takes_value(self):
return self.type is not None
def get_opt_string(self):
if self._long_opts:
return self._long_opts[0]
else:
return self._short_opts[0]
# -- Processing methods --------------------------------------------
def check_value(self, opt, value):
checker = self.TYPE_CHECKER.get(self.type)
if checker is None:
return value
else:
return checker(self, opt, value)
def convert_value(self, opt, value):
if value is not None:
if self.nargs == 1:
return self.check_value(opt, value)
else:
return tuple([self.check_value(opt, v) for v in value])
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
def take_action(self, action, dest, opt, value, values, parser):
if action == "store":
setattr(values, dest, value)
elif action == "store_const":
setattr(values, dest, self.const)
elif action == "store_true":
setattr(values, dest, True)
elif action == "store_false":
setattr(values, dest, False)
elif action == "append":
values.ensure_value(dest, []).append(value)
elif action == "append_const":
values.ensure_value(dest, []).append(self.const)
elif action == "count":
setattr(values, dest, values.ensure_value(dest, 0) + 1)
elif action == "callback":
args = self.callback_args or ()
kwargs = self.callback_kwargs or {}
self.callback(self, opt, value, parser, *args, **kwargs)
elif action == "help":
parser.print_help()
parser.exit()
elif action == "version":
parser.print_version()
parser.exit()
else:
raise RuntimeError, "unknown action %r" % self.action
return 1
# class Option
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
# For compatibility with Python 2.2
try:
True, False
except NameError:
(True, False) = (1, 0)
def isbasestring(x):
return isinstance(x, types.StringType) or isinstance(x, types.UnicodeType)
class Values:
def __init__(self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def __str__(self):
return str(self.__dict__)
__repr__ = _repr
def __cmp__(self, other):
if isinstance(other, Values):
return cmp(self.__dict__, other.__dict__)
elif isinstance(other, types.DictType):
return cmp(self.__dict__, other)
else:
return -1
def _update_careful(self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if dict.has_key(attr):
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose(self, dict):
"""
Update the option values from an arbitrary dictionary,
using all keys from the dictionary regardless of whether
they have a corresponding attribute in self or not.
"""
self.__dict__.update(dict)
def _update(self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError, "invalid update mode: %r" % mode
def read_module(self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file(self, filename, mode="careful"):
vars = {}
execfile(filename, vars)
self._update(vars, mode)
def ensure_value(self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionContainer:
"""
Abstract base class.
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
option_list : [Option]
the list of Option objects contained by this OptionContainer
_short_opt : { string : Option }
dictionary mapping short option strings, eg. "-f" or "-X",
to the Option instances that implement them. If an Option
has multiple short option strings, it will appears in this
dictionary multiple times. [1]
_long_opt : { string : Option }
dictionary mapping long option strings, eg. "--file" or
"--exclude", to the Option instances that implement them.
Again, a given Option can occur multiple times in this
dictionary. [1]
defaults : { string : any }
dictionary mapping option destination names to default
values for each destination [1]
[1] These mappings are common to (shared by) all components of the
controlling OptionParser, where they are initially created.
"""
def __init__(self, option_class, conflict_handler, description):
# Initialize the option list and related data structures.
# This method must be provided by subclasses, and it must
# initialize at least the following instance attributes:
# option_list, _short_opt, _long_opt, defaults.
self._create_option_list()
self.option_class = option_class
self.set_conflict_handler(conflict_handler)
self.set_description(description)
def _create_option_mappings(self):
# For use by OptionParser constructor -- create the master
# option mappings used by this OptionParser and all
# OptionGroups that it owns.
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _share_option_mappings(self, parser):
# For use by OptionGroup constructor -- use shared option
# mappings from the OptionParser that owns this OptionGroup.
self._short_opt = parser._short_opt
self._long_opt = parser._long_opt
self.defaults = parser.defaults
def set_conflict_handler(self, handler):
if handler not in ("error", "resolve"):
raise ValueError, "invalid conflict_resolution value %r" % handler
self.conflict_handler = handler
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def destroy(self):
"""see OptionParser.destroy()."""
del self._short_opt
del self._long_opt
del self.defaults
# -- Option-adding methods -----------------------------------------
def _check_conflict(self, option):
conflict_opts = []
for opt in option._short_opts:
if self._short_opt.has_key(opt):
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if self._long_opt.has_key(opt):
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "error":
raise OptionConflictError(
"conflicting option string(s): %s"
% ", ".join([co[0] for co in conflict_opts]),
option)
elif handler == "resolve":
for (opt, c_option) in conflict_opts:
if opt.startswith("--"):
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
c_option.container.option_list.remove(c_option)
def add_option(self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if type(args[0]) is types.StringType:
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError, "not an Option instance: %r" % option
else:
raise TypeError, "invalid arguments"
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif not self.defaults.has_key(option.dest):
self.defaults[option.dest] = None
return option
def add_options(self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option(self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option(self, opt_str):
return (self._short_opt.has_key(opt_str) or
self._long_opt.has_key(opt_str))
def remove_option(self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %r" % opt_str)
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
option.container.option_list.remove(option)
# -- Help-formatting methods ---------------------------------------
def format_option_help(self, formatter):
if not self.option_list:
return ""
result = []
for option in self.option_list:
if not option.help is SUPPRESS_HELP:
result.append(formatter.format_option(option))
return "".join(result)
def format_description(self, formatter):
return formatter.format_description(self.get_description())
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
class OptionGroup (OptionContainer):
def __init__(self, parser, title, description=None):
self.parser = parser
OptionContainer.__init__(
self, parser.option_class, parser.conflict_handler, description)
self.title = title
def _create_option_list(self):
self.option_list = []
self._share_option_mappings(self.parser)
def set_title(self, title):
self.title = title
def destroy(self):
"""see OptionParser.destroy()."""
OptionContainer.destroy(self)
del self.option_list
# -- Help-formatting methods ---------------------------------------
def format_help(self, formatter):
result = formatter.format_heading(self.title)
formatter.indent()
result += OptionContainer.format_help(self, formatter)
formatter.dedent()
return result
class OptionParser (OptionContainer):
"""
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
usage : string
a usage string for your program. Before it is displayed
to the user, "%prog" will be expanded to the name of
your program (self.prog or os.path.basename(sys.argv[0])).
prog : string
the name of the current program (to override
os.path.basename(sys.argv[0])).
epilog : string
paragraph of help text to print after option help
option_groups : [OptionGroup]
list of option groups in this parser (option groups are
irrelevant for parsing the command-line, but very useful
for generating help)
allow_interspersed_args : bool = true
if true, positional arguments may be interspersed with options.
Assuming -a and -b each take a single argument, the command-line
-ablah foo bar -bboo baz
will be interpreted the same as
-ablah -bboo -- foo bar baz
If this flag were false, that command line would be interpreted as
-ablah -- foo bar -bboo baz
-- ie. we stop processing options as soon as we see the first
non-option argument. (This is the tradition followed by
Python's getopt module, Perl's Getopt::Std, and other argument-
parsing libraries, but it is generally annoying to users.)
process_default_values : bool = true
if true, option default values are processed similarly to option
values from the command line: that is, they are passed to the
type-checking function for the option's type (as long as the
default value is a string). (This really only matters if you
have defined custom types; see SF bug #955889.) Set it to false
to restore the behaviour of Optik 1.4.1 and earlier.
rargs : [string]
the argument list currently being parsed. Only set when
parse_args() is active, and continually trimmed down as
we consume arguments. Mainly there for the benefit of
callback options.
largs : [string]
the list of leftover arguments that we have skipped while
parsing options. If allow_interspersed_args is false, this
list is always empty.
values : Values
the set of option values currently being accumulated. Only
set when parse_args() is active. Also mainly for callbacks.
Because of the 'rargs', 'largs', and 'values' attributes,
OptionParser is not thread-safe. If, for some perverse reason, you
need to parse command-line arguments simultaneously in different
threads, use different OptionParser instances.
"""
standard_option_list = []
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
epilog=None):
OptionContainer.__init__(
self, option_class, conflict_handler, description)
self.set_usage(usage)
self.prog = prog
self.version = version
self.allow_interspersed_args = True
self.process_default_values = True
if formatter is None:
formatter = IndentedHelpFormatter()
self.formatter = formatter
self.formatter.set_parser(self)
self.epilog = epilog
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and (if applicable) the _add_version_option() and
# _add_help_option() methods.
self._populate_option_list(option_list,
add_help=add_help_option)
self._init_parsing_state()
def destroy(self):
"""
Declare that you are done with this OptionParser. This cleans up
reference cycles so the OptionParser (and all objects referenced by
it) can be garbage-collected promptly. After calling destroy(), the
OptionParser is unusable.
"""
OptionContainer.destroy(self)
for group in self.option_groups:
group.destroy()
del self.option_list
del self.option_groups
del self.formatter
# -- Private methods -----------------------------------------------
# (used by our or OptionContainer's constructor)
def _create_option_list(self):
self.option_list = []
self.option_groups = []
self._create_option_mappings()
def _add_help_option(self):
self.add_option("-h", "--help",
action="help",
help=_("show this help message and exit"))
def _add_version_option(self):
self.add_option("--version",
action="version",
help=_("show program's version number and exit"))
def _populate_option_list(self, option_list, add_help=True):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
if self.version:
self._add_version_option()
if add_help:
self._add_help_option()
def _init_parsing_state(self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage(self, usage):
if usage is None:
self.usage = _("%prog [options]")
elif usage is SUPPRESS_USAGE:
self.usage = None
# For backwards compatibility with Optik 1.3 and earlier.
elif usage.lower().startswith("usage: "):
self.usage = usage[7:]
else:
self.usage = usage
def enable_interspersed_args(self):
self.allow_interspersed_args = True
def disable_interspersed_args(self):
self.allow_interspersed_args = False
def set_process_default_values(self, process):
self.process_default_values = process
def set_default(self, dest, value):
self.defaults[dest] = value
def set_defaults(self, **kwargs):
self.defaults.update(kwargs)
def _get_all_options(self):
options = self.option_list[:]
for group in self.option_groups:
options.extend(group.option_list)
return options
def get_default_values(self):
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return Values(self.defaults)
defaults = self.defaults.copy()
for option in self._get_all_options():
default = defaults.get(option.dest)
if isbasestring(default):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return Values(defaults)
# -- OptionGroup methods -------------------------------------------
def add_option_group(self, *args, **kwargs):
# XXX lots of overlap with OptionContainer.add_option()
if type(args[0]) is types.StringType:
group = OptionGroup(self, *args, **kwargs)
elif len(args) == 1 and not kwargs:
group = args[0]
if not isinstance(group, OptionGroup):
raise TypeError, "not an OptionGroup instance: %r" % group
if group.parser is not self:
raise ValueError, "invalid OptionGroup (wrong parser)"
else:
raise TypeError, "invalid arguments"
self.option_groups.append(group)
return group
def get_option_group(self, opt_str):
option = (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
if option and option.container is not self:
return option.container
return None
# -- Option-parsing methods ----------------------------------------
def _get_args(self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError), err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
def check_values(self, values, args):
"""
check_values(values : Values, args : [string])
-> (values : Values, args : [string])
Check that the supplied option values and leftover arguments are
valid. Returns the option values and leftover arguments
(possibly adjusted, possibly completely new -- whatever you
like). Default implementation just returns the passed-in
values; subclasses may override as desired.
"""
return (values, args)
def _process_args(self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
it is an unambiguous abbrevation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error(_("%s option does not take a value") % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts(self, rargs, values):
arg = rargs.pop(0)
stop = False
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
raise BadOptionError(opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Feedback methods ----------------------------------------------
def get_prog_name(self):
if self.prog is None:
return os.path.basename(sys.argv[0])
else:
return self.prog
def expand_prog_name(self, s):
return s.replace("%prog", self.get_prog_name())
def get_description(self):
return self.expand_prog_name(self.description)
def exit(self, status=0, msg=None):
if msg:
sys.stderr.write(msg)
sys.exit(status)
def error(self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(sys.stderr)
self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
def get_usage(self):
if self.usage:
return self.formatter.format_usage(
self.expand_prog_name(self.usage))
else:
return ""
def print_usage(self, file=None):
"""print_usage(file : file = stdout)
Print the usage message for the current program (self.usage) to
'file' (default stdout). Any occurence of the string "%prog" in
self.usage is replaced with the name of the current program
(basename of sys.argv[0]). Does nothing if self.usage is empty
or not defined.
"""
if self.usage:
print >>file, self.get_usage()
def get_version(self):
if self.version:
return self.expand_prog_name(self.version)
else:
return ""
def print_version(self, file=None):
"""print_version(file : file = stdout)
Print the version message for this program (self.version) to
'file' (default stdout). As with print_usage(), any occurence
of "%prog" in self.version is replaced by the current program's
name. Does nothing if self.version is empty or undefined.
"""
if self.version:
print >>file, self.get_version()
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading(_("Options")))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
def format_epilog(self, formatter):
return formatter.format_epilog(self.epilog)
def format_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
result.append(self.format_epilog(formatter))
return "".join(result)
def print_help(self, file=None):
"""print_help(file : file = stdout)
Print an extended help message, listing all options and any
help text provided with them, to 'file' (default stdout).
"""
if file is None:
file = sys.stdout
file.write(self.format_help())
# class OptionParser
def _match_abbrev(s, wordmap):
"""_match_abbrev(s : string, wordmap : {string : Option}) -> string
Return the string key in 'wordmap' for which 's' is an unambiguous
abbreviation. If 's' is found to be ambiguous or doesn't match any of
'words', raise BadOptionError.
"""
# Is there an exact match?
if wordmap.has_key(s):
return s
else:
# Isolate all words with s as a prefix.
possibilities = [word for word in wordmap.keys()
if word.startswith(s)]
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError(s)
else:
# More than one possible completion: ambiguous prefix.
raise AmbiguousOptionError(s, possibilities)
# Some day, there might be many Option classes. As of Optik 1.3, the
# preferred way to instantiate Options is indirectly, via make_option(),
# which will become a factory function when there are many Option
# classes.
make_option = Option
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 1 .. 8 spaces, depending on its position in
its line. If false, each tab is treated as a single character.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
"""
whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace))
unicode_whitespace_trans = {}
uspace = ord(u' ')
for x in map(ord, _whitespace):
unicode_whitespace_trans[x] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[%s]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
% string.lowercase)
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs()
if self.replace_whitespace:
if isinstance(text, types.StringType):
text = text.translate(self.whitespace_trans)
elif isinstance(text, types.UnicodeType):
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
"""
chunks = self.wordsep_re.split(text)
chunks = filter(None, chunks)
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.<|fim▁hole|> pat = self.sentence_end_re
while i < len(chunks)-1:
if chunks[i+1] == " " and pat.search(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
space_left = max(width - cur_len, 1)
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
def dedent(text):
"""dedent(text : string) -> string
Remove any whitespace than can be uniformly removed from the left
of every line in `text`.
This can be used e.g. to make triple-quoted strings line up with
the left edge of screen/whatever, while still presenting it in the
source code in indented form.
For example:
def test():
# end first line with \ to avoid the empty line!
s = '''\
hello
world
'''
print repr(s) # prints ' hello\n world\n '
print repr(dedent(s)) # prints 'hello\n world\n'
"""
lines = text.expandtabs().split('\n')
margin = None
for line in lines:
content = line.lstrip()
if not content:
continue
indent = len(line) - len(content)
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if margin is not None and margin > 0:
for i in range(len(lines)):
lines[i] = lines[i][margin:]
return '\n'.join(lines)<|fim▁end|> | """
i = 0 |
<|file_name|>comparison.js<|end_file_name|><|fim▁begin|>$(document).ready(function(){
$('#datepickerDay1').hide();
$('#datepickerDay2').hide();
$('#datepickerMnd1').hide();
$('#datepickerMnd2').hide();
$('#datepickerYear1').hide();
$('#datepickerYear2').hide();
$('select').change(function(){
var index = $('select').val();
if(index == 0){ //day
$('#datepickerDay1').show();
$('#datepickerDay2').show();
$('#datepickerMnd1').hide();
$('#datepickerMnd2').hide();
$('#datepickerYear1').hide();
$('#datepickerYear2').hide();
localStorage.setItem('type','0');
}
else if(index == 1){ //month
$('#datepickerMnd1').show();
$('#datepickerMnd2').show();
$('#datepickerDay1').hide();
$('#datepickerDay2').hide();
$('#datepickerYear1').hide();
$('#datepickerYear2').hide();
localStorage.setItem('type','1');
}
else if(index == 2){ //year
$('#datepickerYear1').show();
$('#datepickerYear2').show();
$('#datepickerDay1').hide();
$('#datepickerDay2').hide();
$('#datepickerMnd1').hide();
$('#datepickerMnd2').hide();
localStorage.setItem('type','2');
}
});
$('#datepickerDay1').datepicker({
format: "yyyy-mm-dd",
weekStart: 1,
language: "no",
todayHighlight: true
});
$('#datepickerDay1').on('changeDate', function(ev){
$(this).datepicker('hide');
day1();
if ($('input[name=date2]').val() != "") {<|fim▁hole|> showChart2();
showChart3();
showChart4();
showChart5();
showChart6();
}
});
$('#datepickerDay2').datepicker({
format: "yyyy-mm-dd",
weekStart: 1,
language: "no",
todayHighlight: true
});
$('#datepickerDay2').on('changeDate', function(ev){
$(this).datepicker('hide');
day2();
if ($('input[name=date]').val() != "") {
showChart1();
showChart2();
showChart3();
showChart4();
showChart5();
showChart6();
}
});
$('#datepickerYear1').datepicker({
format: "yyyy",
weekStart: 1,
startView: 1,
minViewMode: 2,
language: "no",
todayHighlight: true
});
$('#datepickerYear1').on('changeDate', function(ev){
$(this).datepicker('hide');
year1();
if ($('input[name=date6]').val() != "") {
showChart1();
showChart2();
showChart3();
showChart4();
showChart5();
showChart6();
}
});
$('#datepickerYear2').datepicker({
format: "yyyy",
weekStart: 1,
startView: 1,
minViewMode: 2,
language: "no",
todayHighlight: true
});
$('#datepickerYear2').on('changeDate', function(ev){
$(this).datepicker('hide');
year2();
if ($('input[name=date5]').val() != "") {
showChart1();
showChart2();
showChart3();
showChart4();
showChart5();
showChart6();
}
});
$('#datepickerMnd1').datepicker({
format: "yyyy-mm",
weekStart: 1,
startView: 0,
minViewMode: 1,
language: "no",
todayHighlight: true
});
$('#datepickerMnd1').on('changeDate', function(ev){
$(this).datepicker('hide');
mnd1();
if ($('input[name=date4]').val() != "") {
showChart1();
showChart2();
showChart3();
showChart4();
showChart5();
showChart6();
}
});
$('#datepickerMnd2').datepicker({
format: "yyyy-mm",
weekStart: 1,
startView: 0,
minViewMode: 1,
language: "no",
todayHighlight: true
});
$('#datepickerMnd2').on('changeDate', function(ev){
$(this).datepicker('hide');
mnd2();
if ($('input[name=date3]').val() != "") {
showChart1();
showChart2();
showChart3();
showChart4();
showChart5();
showChart6();
}
});
$('#backBtn').on('click', function(ev){
window.location.replace("../pages/stats.php");
});
function day1(){
var day = $('input[name=date]').val();
var xmlhttp = new XMLHttpRequest();
xmlhttp.onreadystatechange = function(){
if (xmlhttp.readyState == 4 && xmlhttp.status == 200) {
document.getElementById("firstDate").innerHTML = xmlhttp.responseText;
}
}
xmlhttp.open("GET", "../phpBackend/OrgStat/day.php?date=" + day, true);
xmlhttp.send();
}
function day2(){
var day = $('input[name=date2]').val();
var xmlhttp = new XMLHttpRequest();
xmlhttp.onreadystatechange = function(){
if (xmlhttp.readyState == 4 && xmlhttp.status == 200) {
document.getElementById("lastDate").innerHTML = xmlhttp.responseText;
}
}
xmlhttp.open("GET", "../phpBackend/OrgStat/day.php?date=" + day, true);
xmlhttp.send();
}
function mnd1(){
var day = $('input[name=date3]').val() + '-01';
var xmlhttp = new XMLHttpRequest();
xmlhttp.onreadystatechange = function(){
if (xmlhttp.readyState == 4 && xmlhttp.status == 200) {
document.getElementById("firstDate").innerHTML = xmlhttp.responseText;
}
}
xmlhttp.open("GET", "../phpBackend/OrgStat/month.php?date=" + day, true);
xmlhttp.send();
}
function mnd2(){
var day = $('input[name=date4]').val() + '-01';
var xmlhttp = new XMLHttpRequest();
xmlhttp.onreadystatechange = function(){
if (xmlhttp.readyState == 4 && xmlhttp.status == 200) {
document.getElementById("lastDate").innerHTML = xmlhttp.responseText;
}
}
xmlhttp.open("GET", "../phpBackend/OrgStat/month.php?date=" + day, true);
xmlhttp.send();
}
function year1(){
var day = $('input[name=date5]').val();
var xmlhttp = new XMLHttpRequest();
xmlhttp.onreadystatechange = function(){
if (xmlhttp.readyState == 4 && xmlhttp.status == 200) {
document.getElementById("firstDate").innerHTML = xmlhttp.responseText;
}
}
xmlhttp.open("GET", "../phpBackend/OrgStat/year.php?date=" + day, true);
xmlhttp.send();
}
function year2(){
var day = $('input[name=date6]').val();
var xmlhttp = new XMLHttpRequest();
xmlhttp.onreadystatechange = function(){
if (xmlhttp.readyState == 4 && xmlhttp.status == 200) {
document.getElementById("lastDate").innerHTML = xmlhttp.responseText;
}
}
xmlhttp.open("GET", "../phpBackend/OrgStat/year.php?date=" + day, true);
xmlhttp.send();
}
function showChart1(){
var data = [
{
value: 1,
color:"#1AA24C",
highlight: "#1AD24C",
label: "Valg 2"
},
{
value: 1,
color: "#000000",
highlight: "#333333",
label: "Valg 1"
}
]
var ctx = $('#statistikk1').get(0).getContext("2d");
var myDoughnut = new Chart(ctx).Doughnut(data,{
animation:true,
showTooltips: true,
percentageInnerCutout : 0,
segmentShowStroke : true
});
var res1;
var res2;
var x = localStorage.getItem('type');
if(x == 0){ //day
var date1 = $('input[name=date]').val();
var date2 = $('input[name=date2]').val();
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date1 + '&num=' + 1,
success: function(data) {
res1 = parseInt(data);
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date2 + '&num=' + 1,
success: function(data) {
res2 = parseInt(data);
myDoughnut.segments[0].value = res2;
myDoughnut.segments[1].value = res1;
myDoughnut.update();
$("#followersFirstDate").text(res1);
$("#followersLastDate").text(res2);
},
});
},
});
}else if(x == 1){ //month
var date1 = $('input[name=date3]').val() + '-01';
var date2 = $('input[name=date4]').val() + '-01';
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date1 + '&num=' + 2,
success: function(data) {
res1 = parseInt(data);
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date2 + '&num=' + 2,
success: function(data) {
res2 = parseInt(data);
myDoughnut.segments[0].value = res2;
myDoughnut.segments[1].value = res1;
myDoughnut.update();
$("#followersFirstDate").text(res1);
$("#followersLastDate").text(res2);
},
});
},
});
}else if(x == 2){ //year
var date1 = $('input[name=date5]').val();
var date2 = $('input[name=date6]').val();
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date1 + '&num=' + 3,
success: function(data) {
res1 = parseInt(data);
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date2 + '&num=' + 3,
success: function(data) {
res2 = parseInt(data);
myDoughnut.segments[0].value = res2;
myDoughnut.segments[1].value = res1;
myDoughnut.update();
$("#followersFirstDate").text(res1);
$("#followersLastDate").text(res2);
},
});
},
});
}
}
function showChart2(){
var data = [
{
value: 1,
color:"#1AA24C",
highlight: "#1AD24C",
label: "Valg 2"
},
{
value: 1,
color: "#000000",
highlight: "#333333",
label: "Valg 1"
}
]
var ctx = $('#statistikk2').get(0).getContext("2d");
var myDoughnut = new Chart(ctx).Doughnut(data,{
animation:true,
showTooltips: true,
percentageInnerCutout : 0,
segmentShowStroke : true
});
var res1;
var res2;
var x = localStorage.getItem('type');
if(x == 0){ //day
var date1 = $('input[name=date]').val();
var date2 = $('input[name=date2]').val();
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date1 + '&num=' + 4,
success: function(data) {
res1 = parseInt(data);
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date2 + '&num=' + 4,
success: function(data) {
res2 = parseInt(data);
myDoughnut.segments[0].value = res2;
myDoughnut.segments[1].value = res1;
myDoughnut.update();
$("#donationsFirstDate").text(res1);
$("#donationsLastDate").text(res2);
},
});
},
});
}else if(x == 1){ //month
var date1 = $('input[name=date3]').val() + '-01';
var date2 = $('input[name=date4]').val() + '-01';
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date1 + '&num=' + 5,
success: function(data) {
res1 = parseInt(data);
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date2 + '&num=' + 5,
success: function(data) {
res2 = parseInt(data);
myDoughnut.segments[0].value = res2;
myDoughnut.segments[1].value = res1;
myDoughnut.update();
$("#donationsFirstDate").text(res1);
$("#donationsLastDate").text(res2);
},
});
},
});
}else if(x == 2){ //year
var date1 = $('input[name=date5]').val();
var date2 = $('input[name=date6]').val();
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date1 + '&num=' + 6,
success: function(data) {
res1 = parseInt(data);
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date2 + '&num=' + 6,
success: function(data) {
res2 = parseInt(data);
myDoughnut.segments[0].value = res2;
myDoughnut.segments[1].value = res1;
myDoughnut.update();
$("#donationsFirstDate").text(res1);
$("#donationsLastDate").text(res2);
},
});
},
});
}
}
function showChart3(){
var data = [
{
value: 1,
color:"#1AA24C",
highlight: "#1AD24C",
label: "Valg 2"
},
{
value: 1,
color: "#000000",
highlight: "#333333",
label: "Valg 1"
}
]
var ctx = $('#statistikk3').get(0).getContext("2d");
var myDoughnut = new Chart(ctx).Doughnut(data,{
animation:true,
showTooltips: true,
percentageInnerCutout : 0,
segmentShowStroke : true
});
var res1;
var res2;
var x = localStorage.getItem('type');
if(x == 0){ //day
var date1 = $('input[name=date]').val();
var date2 = $('input[name=date2]').val();
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date1 + '&num=' + 7,
success: function(data) {
res1 = parseInt(data);
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date2 + '&num=' + 7,
success: function(data) {
res2 = parseInt(data);
myDoughnut.segments[0].value = res2;
myDoughnut.segments[1].value = res1;
myDoughnut.update();
$("#moneyDonatedFirstDate").text(res1 + ",-");
$("#moneyDonatedLastDate").text(res2 + ",-");
},
});
},
});
}else if(x == 1){ //month
var date1 = $('input[name=date3]').val() + '-01';
var date2 = $('input[name=date4]').val() + '-01';
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date1 + '&num=' + 8,
success: function(data) {
res1 = parseInt(data);
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date2 + '&num=' + 8,
success: function(data) {
res2 = parseInt(data);
myDoughnut.segments[0].value = res2;
myDoughnut.segments[1].value = res1;
myDoughnut.update();
$("#moneyDonatedFirstDate").text(res1 + ",-");
$("#moneyDonatedLastDate").text(res2 + ",-");
},
});
},
});
}else if(x == 2){ //year
var date1 = $('input[name=date5]').val();
var date2 = $('input[name=date6]').val();
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date1 + '&num=' + 9,
success: function(data) {
res1 = parseInt(data);
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date2 + '&num=' + 9,
success: function(data) {
res2 = parseInt(data);
myDoughnut.segments[0].value = res2;
myDoughnut.segments[1].value = res1;
myDoughnut.update();
$("#moneyDonatedFirstDate").text(res1 + ",-");
$("#moneyDonatedLastDate").text(res2 + ",-");
},
});
},
});
}
}
function showChart4(){
var data = [
{
value: 1,
color:"#1AA24C",
highlight: "#1AD24C",
label: "Valg 2"
},
{
value: 1,
color: "#000000",
highlight: "#333333",
label: "Valg 1"
}
]
var ctx = $('#statistikk4').get(0).getContext("2d");
var myDoughnut = new Chart(ctx).Doughnut(data,{
animation:true,
showTooltips: true,
percentageInnerCutout : 0,
segmentShowStroke : true
});
var res1;
var res2;
var x = localStorage.getItem('type');
if(x == 0){ //day
var date1 = $('input[name=date]').val();
var date2 = $('input[name=date2]').val();
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date1 + '&num=' + 10,
success: function(data) {
res1 = parseInt(data);
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date2 + '&num=' + 10,
success: function(data) {
res2 = parseInt(data);
myDoughnut.segments[0].value = res2;
myDoughnut.segments[1].value = res1;
myDoughnut.update();
$("#averageDonationFirstDate").text(res1 + ",-");
$("#averageDonationLastDate").text(res2 + ",-");
},
});
},
});
}else if(x == 1){ //month
var date1 = $('input[name=date3]').val() + '-01';
var date2 = $('input[name=date4]').val() + '-01';
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date1 + '&num=' + 11,
success: function(data) {
res1 = parseInt(data);
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date2 + '&num=' + 11,
success: function(data) {
res2 = parseInt(data);
myDoughnut.segments[0].value = res2;
myDoughnut.segments[1].value = res1;
myDoughnut.update();
$("#averageDonationFirstDate").text(res1 + ",-");
$("#averageDonationLastDate").text(res2 + ",-");
},
});
},
});
}else if(x == 2){ //year
var date1 = $('input[name=date5]').val();
var date2 = $('input[name=date6]').val();
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date1 + '&num=' + 12,
success: function(data) {
res1 = parseInt(data);
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date2 + '&num=' + 12,
success: function(data) {
res2 = parseInt(data);
myDoughnut.segments[0].value = res2;
myDoughnut.segments[1].value = res1;
myDoughnut.update();
$("#averageDonationFirstDate").text(res1 + ",-");
$("#averageDonationLastDate").text(res2 + ",-");
},
});
},
});
}
}
function showChart5(){
var data = [
{
value: 1,
color:"#1AA24C",
highlight: "#1AD24C",
label: "Valg 2"
},
{
value: 1,
color: "#000000",
highlight: "#333333",
label: "Valg 1"
}
]
var ctx = $('#statistikk5').get(0).getContext("2d");
var myDoughnut = new Chart(ctx).Doughnut(data,{
animation:true,
showTooltips: true,
percentageInnerCutout : 0,
segmentShowStroke : true
});
var res1;
var res2;
var x = localStorage.getItem('type');
if(x == 0){ //day
var date1 = $('input[name=date]').val();
var date2 = $('input[name=date2]').val();
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date1 + '&num=' + 13,
success: function(data) {
res1 = parseInt(data);
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date2 + '&num=' + 13,
success: function(data) {
res2 = parseInt(data);
myDoughnut.segments[0].value = res2;
myDoughnut.segments[1].value = res1;
myDoughnut.update();
$("#newsFirstDate").text(res1);
$("#newsLastDate").text(res2);
},
});
},
});
}else if(x == 1){ //month
var date1 = $('input[name=date3]').val() + '-01';
var date2 = $('input[name=date4]').val() + '-01';
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date1 + '&num=' + 14,
success: function(data) {
res1 = parseInt(data);
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date2 + '&num=' + 14,
success: function(data) {
res2 = parseInt(data);
myDoughnut.segments[0].value = res2;
myDoughnut.segments[1].value = res1;
myDoughnut.update();
$("#newsFirstDate").text(res1);
$("#newsLastDate").text(res2);
},
});
},
});
}else if(x == 2){ //year
var date1 = $('input[name=date5]').val();
var date2 = $('input[name=date6]').val();
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date1 + '&num=' + 15,
success: function(data) {
res1 = parseInt(data);
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date2 + '&num=' + 15,
success: function(data) {
res2 = parseInt(data);
myDoughnut.segments[0].value = res2;
myDoughnut.segments[1].value = res1;
myDoughnut.update();
$("#newsFirstDate").text(res1);
$("#newsLastDate").text(res2);
},
});
},
});
}
}
function showChart6(){
var data = [
{
value: 1,
color:"#1AA24C",
highlight: "#1AD24C",
label: "Valg 2"
},
{
value: 1,
color: "#000000",
highlight: "#333333",
label: "Valg 1"
}
]
var ctx = $('#statistikk6').get(0).getContext("2d");
var myDoughnut = new Chart(ctx).Doughnut(data,{
animation:true,
showTooltips: true,
percentageInnerCutout : 0,
segmentShowStroke : true
});
var res1;
var res2;
var x = localStorage.getItem('type');
if(x == 0){ //day
var date1 = $('input[name=date]').val();
var date2 = $('input[name=date2]').val();
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date1 + '&num=' + 16,
success: function(data) {
res1 = parseInt(data);
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date2 + '&num=' + 16,
success: function(data) {
res2 = parseInt(data);
myDoughnut.segments[0].value = res2;
myDoughnut.segments[1].value = res1;
myDoughnut.update();
$("#prosjectsFirstDate").text(res1);
$("#prosjectsLastDate").text(res2);
},
});
},
});
}else if(x == 1){ //month
var date1 = $('input[name=date3]').val() + '-01';
var date2 = $('input[name=date4]').val() + '-01';
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date1 + '&num=' + 17,
success: function(data) {
res1 = parseInt(data);
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date2 + '&num=' + 17,
success: function(data) {
res2 = parseInt(data);
myDoughnut.segments[0].value = res2;
myDoughnut.segments[1].value = res1;
myDoughnut.update();
$("#prosjectsFirstDate").text(res1);
$("#prosjectsLastDate").text(res2);
},
});
},
});
}else if(x == 2){ //year
var date1 = $('input[name=date5]').val();
var date2 = $('input[name=date6]').val();
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date1 + '&num=' + 18,
success: function(data) {
res1 = parseInt(data);
$.ajax({
url: '../phpBackend/doughnut.php?date='+ date2 + '&num=' + 18,
success: function(data) {
res2 = parseInt(data);
myDoughnut.segments[0].value = res2;
myDoughnut.segments[1].value = res1;
myDoughnut.update();
$("#prosjectsFirstDate").text(res1);
$("#prosjectsLastDate").text(res2);
},
});
},
});
}
}
});<|fim▁end|> | showChart1(); |
<|file_name|>method-self-arg-2.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test method calls with self as an argument cannot subvert borrow checking.
struct Foo;
impl Foo {
fn bar(&self) {}
fn baz(&mut self) {}<|fim▁hole|> let y = &mut x;
Foo::bar(&x); //~ERROR cannot borrow `x`
let x = Foo;
Foo::baz(&x); //~ERROR cannot borrow immutable dereference of `&`-pointer as mutable
}<|fim▁end|> | }
fn main() {
let mut x = Foo; |
<|file_name|>solution-0.cpp<|end_file_name|><|fim▁begin|>class Solution {
public:<|fim▁hole|>
for (int i = 1; i <= amount; ++i) {
dp[i] = -1;
}
dp[0] = 0;
for (int i = 0; i <= amount; ++i) {
for (int j = 0; j < n; ++j) {
int val = coins[j] + i;
if (val <= amount && dp[i] != -1) {
if (dp[val] == -1) {
dp[val] = dp[i] + 1;
} else {
dp[val] = min(dp[val], dp[i] + 1);
}
}
}
// for (int k = 0; k <= amount; ++k) {
// printf("%d ", dp[k]);
// }
// printf("\n");
}
return dp[amount];
}
};<|fim▁end|> | int coinChange(vector<int>& coins, int amount) {
const int n = coins.size();
int dp[amount + 1]; |
<|file_name|>script07_use_gene_list.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import os
import sys
import glob
import argparse
from datetime import datetime
import platform
if platform.system().lower() == 'darwin':
os.environ['PYTHONPATH'] = '%s/osx_libs:$PYTHONPATH' % os.getcwd()
import wormtable as wt
################################################################################
# This script allows the user to filter variants in a vcf file based on one or
# more genes of interest. Genes can be provided as a comma-separated string or
# as a text file, with one gene per line. The query can be either positive (keep
# variants annotated to any of the input genes) or negative (keep variants not
# annotated to any of the input genes).
################################################################################
def parse_args():
"""
Parse the input arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', dest = 'inp_folder', required = True,
help = 'input folder containing the several wormtables')
parser.add_argument('-o', dest = 'out_file', required = True,
help = 'output file [.txt]')
parser.add_argument('-g', dest = 'genes_to_query', required = True,
help = 'genes of interest [comma-sep. string or file ' +
'path]')
parser.add_argument('-f', dest = 'field_name', required = True,
help = 'field where gene names have to be searched')
parser.add_argument('-n', dest = 'negative_query', required = True,
help = 'is this a negative query? [True or False]')
parser.add_argument('-p', dest = 'previous_results', required = False,
help = 'previously saved results from another query ' +
'[.txt]')
args = parser.parse_args()
return args
def check_input_file(folder_name):
"""
Make sure that the input file's path is properly defined.
"""
if not os.path.exists(folder_name):
sys.stderr.write("\nFolder named '" + folder_name + "' does not exist.\n")
sys.exit()
return folder_name
def check_output_file(file_name):
"""
Make sure that the input file's path does not already exist.
"""
if os.path.exists(file_name):
sys.stderr.write("\nFile named '" + file_name + "' already exists.\n")
sys.exit()
return file_name
<|fim▁hole|> Store all input gene names in a set. If the path of genes_to_query does not
exist, it will treat genes_to_query as a string.
"""
genes = set()
# genes_to_query is a text file
if os.path.exists(genes_to_query):
f = open(genes_to_query)
for line in f:
genes.add(line.strip('\n'))
f.close()
# genes_to_query is a comma-separated string
else:
genes = set(genes_to_query.split(','))
return genes
def get_variants_assoc_to_gene_set_from_previous_results(inp_folder, genes,
field_name, negative_query, previous_results):
"""
Open the field_name wormtable (assumed to be named 'inp_folder/field_name.wt')
within inp_folder and return a set of all row IDs where at least one gene from
genes is found. Use ids from previous_results as starting point to further
filter the data and to make it faster.
If negative_query is True, only variants NOT containing any of the input genes
in field_name will be returned; if False, viceversa (positive query is run).
"""
# extract row IDs to check from previous_results (which is a file path) and
# store them in a set; NOTE: it assumes previous_results has a 1-line header,
# is tab-separated and row_id is the left-most field!
ids_to_check = set()
f = open(previous_results)
header = True
for line in f:
if header:
header = False
else:
ids_to_check.add(int(line.split('\t')[0]))
f.close()
# open wormtable for the field of interest
table = wt.open_table(inp_folder + '/' + field_name + '.wt',
db_cache_size='4G')
index = table.open_index('row_id')
all_ids = set()
pos_ids = set()
# NOTE: it assumes the wormtable has only two columns: 'row_id' and field_name
row_id_idx = 0
field_name_idx = 1
for row in index.cursor(['row_id', field_name]):
if row[row_id_idx] in ids_to_check:
all_ids.add(row[row_id_idx])
for value in row[field_name_idx].split(','):
for gene in genes:
if value.find(gene) != -1:
pos_ids.add(row[row_id_idx])
break
# close table and index
table.close()
index.close()
# if "negative_query" is True, return all row IDs which are not in "pos_ids"
if negative_query == 'True':
neg_ids = all_ids - pos_ids
return neg_ids
elif negative_query == 'False':
return pos_ids
def get_variants_assoc_to_gene_set(inp_folder, genes, field_name,
negative_query):
"""
Open the field_name wormtable (assumed to be named 'inp_folder/field_name.wt')
within inp_folder and return a set of all row IDs where at least one gene from
genes is found.
If negative_query is True, only variants NOT containing any of the input genes
in field_name will be returned; if False, viceversa (positive query is run).
"""
# open wormtable for the field of interest
table = wt.open_table(inp_folder + '/' + field_name + '.wt',
db_cache_size='4G')
all_ids = set()
pos_ids = set()
# NOTE: it assumes the wormtable has only two columns: 'row_id' and field_name
row_id_idx = 0
field_name_idx = 1
for row in table.cursor(['row_id', field_name]):
all_ids.add(row[row_id_idx])
for value in row[field_name_idx].split(','):
for gene in genes:
if value.find(gene) != -1:
pos_ids.add(row[row_id_idx])
break
# close table
table.close()
# if "negative_query" is True, return all row IDs which are not in "pos_ids"
if negative_query == 'True':
neg_ids = all_ids - pos_ids
return neg_ids
elif negative_query == 'False':
return pos_ids
def retrieve_variants_by_rowid(inp_folder, ids, out_file):
"""
Use the row IDs in ids to query the complete wormtable (containing all variant
fields) and return all the information about the filtered variants.
"""
# open table and load indices
table = wt.open_table(inp_folder + '/schema.wt', db_cache_size='4G')
index = table.open_index('row_id')
# retrieve the rows using the 'row_id' field and write the results in out_file
col_names = [col.get_name() for col in table.columns()]
row_id_idx = col_names.index('row_id')
out = open(out_file, 'w')
out.write('\t'.join(col_names) + '\n')
for row in index.cursor(col_names):
if row[row_id_idx] in ids:
to_write = list()
for value in row:
try: # value is a number (int or float)
to_write.append(int(value))
except TypeError, e: # value is a tuple
if value is not None:
to_write.append(','.join([str(x) for x in value]))
else:
to_write.append(None)
except ValueError, e: # value is a string
to_write.append(value)
except:
to_write.append(None)
out.write('\t'.join([str(x) for x in to_write]) + '\n')
out.close()
# close table and index
table.close()
index.close()
return
def script07_api_call(i_folder, o_file, genes_to_query, field_name,
negative_query, previous_results = None):
"""
API call for web-based and other front-end services, to avoid a system call
and a new Python process.
"""
t1 = datetime.now()
inp_folder = check_input_file(i_folder)
out_file = check_output_file(o_file)
negative_query = str(negative_query).lower()
if negative_query.startswith('t'):
negative_query = 'True'
else:
negative_query = 'False'
genes = store_genes(genes_to_query)
if previous_results != None:
ids = get_variants_assoc_to_gene_set_from_previous_results(inp_folder,
genes, field_name, negative_query, previous_results)
else:
ids = get_variants_assoc_to_gene_set(inp_folder, genes, field_name,
negative_query)
retrieve_variants_by_rowid(inp_folder, ids, out_file)
t2 = datetime.now()
sys.stderr.write('%s\n' % str(t2 - t1))
return
def main():
"""
Main function.
"""
args = parse_args()
script07_api_call(args.inp_folder, args.out_file, args.genes_to_query,
args.field_name, args.negative_query, args.previous_results)
if __name__ == '__main__':
main()<|fim▁end|> | def store_genes(genes_to_query):
""" |
<|file_name|>property_injection.test.ts<|end_file_name|><|fim▁begin|>/// <reference path="../globals.d.ts" />
import { expect } from "chai";
import "es6-symbol/implement";
import {
Container, injectable, inject,
named, tagged, multiInject,
unmanaged, optional
} from "../../src/inversify";
describe("Property Injection", () => {
it("Should be able to inject a property", () => {
let TYPES = {
Warrior: "Warrior",
Weapon: "Weapon"
};
interface Weapon {
name: string;
}
@injectable()
class Katana implements Weapon {
public name: string;
public constructor() {
this.name = "Katana";
}
}
interface Warrior {
name: string;
weapon: Weapon;
}
@injectable()
class Samurai implements Warrior {
public name: string;
@inject(TYPES.Weapon)
public weapon: Weapon;
public constructor() {
this.name = "Samurai";
}
}
let container = new Container();
container.bind<Warrior>(TYPES.Warrior).to(Samurai);
container.bind<Weapon>(TYPES.Weapon).to(Katana);
let warrior = container.get<Warrior>(TYPES.Warrior);
expect(warrior.name).to.eql("Samurai");
expect(warrior.weapon).not.to.eql(undefined);
expect(warrior.weapon.name).to.eql("Katana");
});
it("Should be able to inject a property combined with constructor injection", () => {
let TYPES = {
Warrior: "Warrior",
Weapon: "Weapon"
};
let TAGS = {
Primary: "Primary",
Secondary: "Secondary"
};
interface Weapon {
name: string;
}
@injectable()
class Katana implements Weapon {
public name: string;
public constructor() {
this.name = "Katana";
}
}
@injectable()
class Shuriken implements Weapon {
public name: string;
public constructor() {
this.name = "Shuriken";
}
}
interface Warrior {
name: string;
primaryWeapon: Weapon;
secondaryWeapon: Weapon;
}
@injectable()
class Samurai implements Warrior {
public name: string;
public primaryWeapon: Weapon;
@inject(TYPES.Weapon)
@named(TAGS.Secondary)
public secondaryWeapon: Weapon;
public constructor(
@inject(TYPES.Weapon) @named(TAGS.Primary) weapon: Weapon
) {
this.name = "Samurai";
this.primaryWeapon = weapon;
}
}
let container = new Container();
container.bind<Warrior>(TYPES.Warrior).to(Samurai);
container.bind<Weapon>(TYPES.Weapon).to(Katana).whenTargetNamed(TAGS.Primary);
container.bind<Weapon>(TYPES.Weapon).to(Shuriken).whenTargetNamed(TAGS.Secondary);
let warrior = container.get<Warrior>(TYPES.Warrior);
expect(warrior.name).to.eql("Samurai");
expect(warrior.primaryWeapon).not.to.eql(undefined);
expect(warrior.primaryWeapon.name).to.eql("Katana");
expect(warrior.secondaryWeapon).not.to.eql(undefined);
expect(warrior.secondaryWeapon.name).to.eql("Shuriken");
});
it("Should be able to inject a named property", () => {
let TYPES = {
Warrior: "Warrior",
Weapon: "Weapon"
};
let TAGS = {
Primary: "Primary",
Secondary: "Secondary"
};
interface Weapon {
name: string;
}
@injectable()
class Katana implements Weapon {
public name: string;
public constructor() {
this.name = "Katana";
}
}
@injectable()
class Shuriken implements Weapon {
public name: string;
public constructor() {
this.name = "Shuriken";
}
}
interface Warrior {
name: string;
primaryWeapon: Weapon;
secondaryWeapon: Weapon;
}
@injectable()
class Samurai implements Warrior {
public name: string;
@inject(TYPES.Weapon)
@named(TAGS.Primary)
public primaryWeapon: Weapon;
@inject(TYPES.Weapon)
@named(TAGS.Secondary)
public secondaryWeapon: Weapon;
public constructor() {
this.name = "Samurai";
}
}
let container = new Container();
container.bind<Warrior>(TYPES.Warrior).to(Samurai);
container.bind<Weapon>(TYPES.Weapon).to(Katana).whenTargetNamed(TAGS.Primary);
container.bind<Weapon>(TYPES.Weapon).to(Shuriken).whenTargetNamed(TAGS.Secondary);
let warrior = container.get<Warrior>(TYPES.Warrior);
expect(warrior.name).to.eql("Samurai");
expect(warrior.primaryWeapon).not.to.eql(undefined);
expect(warrior.primaryWeapon.name).to.eql("Katana");
expect(warrior.secondaryWeapon).not.to.eql(undefined);
expect(warrior.secondaryWeapon.name).to.eql("Shuriken");
});
it("Should be able to inject a tagged property", () => {
let TYPES = {
Warrior: "Warrior",
Weapon: "Weapon"
};
let TAGS = {
Primary: "Primary",
Priority: "Priority",
Secondary: "Secondary"
};
interface Weapon {
name: string;
}
@injectable()
class Katana implements Weapon {
public name: string;
public constructor() {
this.name = "Katana";
}
}
@injectable()
class Shuriken implements Weapon {
public name: string;
public constructor() {
this.name = "Shuriken";
}
}
interface Warrior {
name: string;
primaryWeapon: Weapon;
secondaryWeapon: Weapon;
}
@injectable()
class Samurai implements Warrior {
public name: string;
@inject(TYPES.Weapon)
@tagged(TAGS.Priority, TAGS.Primary)
public primaryWeapon: Weapon;
@inject(TYPES.Weapon)
@tagged(TAGS.Priority, TAGS.Secondary)
public secondaryWeapon: Weapon;
public constructor() {
this.name = "Samurai";
}
}
let container = new Container();
container.bind<Warrior>(TYPES.Warrior).to(Samurai);
container.bind<Weapon>(TYPES.Weapon).to(Katana).whenTargetTagged(TAGS.Priority, TAGS.Primary);
container.bind<Weapon>(TYPES.Weapon).to(Shuriken).whenTargetTagged(TAGS.Priority, TAGS.Secondary);
let warrior = container.get<Warrior>(TYPES.Warrior);
expect(warrior.name).to.eql("Samurai");
expect(warrior.primaryWeapon).not.to.eql(undefined);
expect(warrior.primaryWeapon.name).to.eql("Katana");
expect(warrior.secondaryWeapon).not.to.eql(undefined);
expect(warrior.secondaryWeapon.name).to.eql("Shuriken");
});
it("Should be able to multi-inject a property", () => {
let TYPES = {
Warrior: "Warrior",
Weapon: "Weapon"
};
interface Weapon {
name: string;
}
@injectable()
class Katana implements Weapon {
public name: string;
public constructor() {
this.name = "Katana";
}
}
@injectable()
class Shuriken implements Weapon {
public name: string;
public constructor() {
this.name = "Shuriken";
}
}
interface Warrior {
name: string;
weapons: Weapon[];
}
@injectable()
class Samurai implements Warrior {
public name: string;
@multiInject(TYPES.Weapon)
public weapons: Weapon[];
public constructor() {
this.name = "Samurai";
}
}
let container = new Container();
container.bind<Warrior>(TYPES.Warrior).to(Samurai);
container.bind<Weapon>(TYPES.Weapon).to(Katana);<|fim▁hole|> expect(warrior.weapons[0]).not.to.eql(undefined);
expect(warrior.weapons[0].name).to.eql("Katana");
expect(warrior.weapons[1]).not.to.eql(undefined);
expect(warrior.weapons[1].name).to.eql("Shuriken");
});
it("Should be able to inject a property in a base class", () => {
let TYPES = {
Warrior: "Warrior",
Weapon: "Weapon"
};
let TAGS = {
Primary: "Primary",
Priority: "Priority",
Secondary: "Secondary"
};
interface Weapon {
name: string;
}
@injectable()
class Katana implements Weapon {
public name: string;
public constructor() {
this.name = "Katana";
}
}
@injectable()
class Shuriken implements Weapon {
public name: string;
public constructor() {
this.name = "Shuriken";
}
}
interface Warrior {
name: string;
primaryWeapon: Weapon;
}
@injectable()
class BaseWarrior implements Warrior {
public name: string;
@inject(TYPES.Weapon)
@tagged(TAGS.Priority, TAGS.Primary)
public primaryWeapon: Weapon;
public constructor(@unmanaged() name: string) {
this.name = name;
}
}
@injectable()
class Samurai extends BaseWarrior {
@inject(TYPES.Weapon)
@tagged(TAGS.Priority, TAGS.Secondary)
public secondaryWeapon: Weapon;
public constructor() {
super("Samurai");
}
}
let container = new Container();
container.bind<Warrior>(TYPES.Warrior).to(Samurai);
container.bind<Weapon>(TYPES.Weapon).to(Katana).whenTargetTagged(TAGS.Priority, TAGS.Primary);
container.bind<Weapon>(TYPES.Weapon).to(Shuriken).whenTargetTagged(TAGS.Priority, TAGS.Secondary);
let samurai = container.get<Samurai>(TYPES.Warrior);
expect(samurai.name).to.eql("Samurai");
expect(samurai.secondaryWeapon).not.to.eql(undefined);
expect(samurai.secondaryWeapon.name).to.eql("Shuriken");
expect(samurai.primaryWeapon).not.to.eql(undefined);
expect(samurai.primaryWeapon.name).to.eql("Katana");
});
it("Should be able to flag a property injection as optional", () => {
let TYPES = {
Route: "Route",
Router: "Router"
};
interface Route {
name: string;
}
@injectable()
class Router {
@inject(TYPES.Route) @optional()
private route: Route;
public getRoute(): Route {
return this.route;
}
}
let container = new Container();
container.bind<Router>(TYPES.Router).to(Router);
let router1 = container.get<Router>(TYPES.Router);
expect(router1.getRoute()).to.eql(undefined);
container.bind<Route>(TYPES.Route).toConstantValue({ name: "route1" });
let router2 = container.get<Router>(TYPES.Router);
expect(router2.getRoute().name).to.eql("route1");
});
});<|fim▁end|> | container.bind<Weapon>(TYPES.Weapon).to(Shuriken);
let warrior = container.get<Warrior>(TYPES.Warrior);
expect(warrior.name).to.eql("Samurai"); |
<|file_name|>index.d.ts<|end_file_name|><|fim▁begin|>// Type definitions for redux-batched-actions 0.1
// Project: https://github.com/tshelburne/redux-batched-actions
// Definitions by: Chad Burggraf <https://github.com/ChadBurggraf>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
import { Action, Reducer } from 'redux';
export as namespace ReduxBatchedActions;
/**
* Batching action creator that creates a higher-order
* action from an array of actions.
*/<|fim▁hole|>
/**
* Creates a higher-order reducer that enables batching
* actions for the given reducer.
*/
export function enableBatching<S>(reducer: Reducer<S>): Reducer<S>;<|fim▁end|> | export function batchActions<A extends Action>(actions: A[]): Action; |
<|file_name|>Solution.java<|end_file_name|><|fim▁begin|>package com.javarush.task.task11.task1109;
/*
Как кошка с собакой
*/
public class Solution {
public static void main(String[] args) {
Cat cat = new Cat("Vaska", 5);
Dog dog = new Dog("Sharik", 4);
cat.isDogNear(dog);
dog.isCatNear(cat);
}
public static class Cat {
private String name;
private int speed;
public Cat(String name, int speed) {
this.name = name;
this.speed = speed;
}
private String getName() {
return name;
}
private int getSpeed() {
return speed;
}
public boolean isDogNear(Dog dog) {
return this.speed > dog.getSpeed();
}
}
public static class Dog {
private String name;
private int speed;
public Dog(String name, int speed) {
this.name = name;
this.speed = speed;<|fim▁hole|> private String getName() {
return name;
}
private int getSpeed() {
return speed;
}
public boolean isCatNear(Cat cat) {
return this.speed > cat.getSpeed();
}
}
}<|fim▁end|> | }
|
<|file_name|>near_hundred.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""
Given an int n, return True if it is within 10 of 100 or 200. Note: abs(num) computes the absolute value of a number.
near_hundred(93) == True
near_hundred(90) == True
near_hundred(89) == False<|fim▁hole|>
def near_hundred(n):
return 190 <= abs(n) <= 210 or 90 <= abs(n) <= 110
def test_function():
assert near_hundred(93) == True
assert near_hundred(90) == True
assert near_hundred(89) == False
assert near_hundred(110) == True
assert near_hundred(111) == False
assert near_hundred(121) == False
assert near_hundred(0) == False
assert near_hundred(5) == False
assert near_hundred(191) == True
assert near_hundred(189) == False
assert near_hundred(190) == True
assert near_hundred(200) == True
assert near_hundred(210) == True
assert near_hundred(211) == False
assert near_hundred(290) == False
if __name__ == '__main__':
test_function()<|fim▁end|> | """
|
<|file_name|>protover.rs<|end_file_name|><|fim▁begin|>// Copyright (c) 2016-2019, The Tor Project, Inc. */
// See LICENSE for licensing information */
extern crate protover;
use protover::errors::ProtoverError;
use protover::ProtoEntry;
use protover::ProtoverVote;
use protover::UnvalidatedProtoEntry;
#[test]
fn parse_protocol_with_single_proto_and_single_version() {
let _: ProtoEntry = "Cons=1".parse().unwrap();
}
#[test]
fn parse_protocol_with_single_protocol_and_multiple_versions() {
let _: ProtoEntry = "Cons=1-2".parse().unwrap();
}
#[test]
fn parse_protocol_with_different_single_protocol_and_single_version() {
let _: ProtoEntry = "HSDir=1".parse().unwrap();
}
#[test]
fn parse_protocol_with_single_protocol_and_supported_version() {
let _: ProtoEntry = "Desc=2".parse().unwrap();
}
#[test]
fn parse_protocol_with_two_protocols_and_single_version() {
let _: ProtoEntry = "Cons=1 HSDir=1".parse().unwrap();
}
#[test]
fn parse_protocol_with_single_protocol_and_two_sequential_versions() {
let _: ProtoEntry = "Desc=1-2".parse().unwrap();
}
#[test]
fn parse_protocol_with_single_protocol_and_protocol_range() {
let _: ProtoEntry = "Link=1-4".parse().unwrap();
}
#[test]
fn parse_protocol_with_single_protocol_and_protocol_set() {
let _: ProtoEntry = "Link=3-4 Desc=2".parse().unwrap();
}
#[test]
fn protocol_all_supported_with_single_protocol_and_protocol_set() {
let protocols: UnvalidatedProtoEntry = "Link=3-4 Desc=2".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocols.all_supported();
assert_eq!(true, unsupported.is_none());
}
#[test]
fn protocol_all_supported_with_two_values() {
let protocols: UnvalidatedProtoEntry = "Microdesc=1-2 Relay=2".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocols.all_supported();
assert_eq!(true, unsupported.is_none());
}
#[test]
fn protocol_all_supported_with_one_value() {
let protocols: UnvalidatedProtoEntry = "Microdesc=1-2".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocols.all_supported();
assert_eq!(true, unsupported.is_none());
}
#[test]
#[should_panic]
fn parse_protocol_unvalidated_with_empty() {
let _: UnvalidatedProtoEntry = "".parse().unwrap();
}
#[test]
#[should_panic]
fn parse_protocol_validated_with_empty() {
let _: UnvalidatedProtoEntry = "".parse().unwrap();
}
#[test]
fn protocol_all_supported_with_three_values() {
let protocols: UnvalidatedProtoEntry = "LinkAuth=1 Microdesc=1-2 Relay=2".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocols.all_supported();
assert_eq!(true, unsupported.is_none());
}
#[test]
fn protocol_all_supported_with_unsupported_protocol() {
let protocols: UnvalidatedProtoEntry = "Wombat=9".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocols.all_supported();
assert_eq!(true, unsupported.is_some());
assert_eq!("Wombat=9", &unsupported.unwrap().to_string());
}
#[test]
fn protocol_all_supported_with_unsupported_versions() {
let protocols: UnvalidatedProtoEntry = "Link=3-999".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocols.all_supported();
assert_eq!(true, unsupported.is_some());
assert_eq!("Link=6-999", &unsupported.unwrap().to_string());
}
#[test]
fn protocol_all_supported_with_unsupported_low_version() {
let protocols: UnvalidatedProtoEntry = "HSIntro=2-3".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocols.all_supported();
assert_eq!(true, unsupported.is_some());
assert_eq!("HSIntro=2", &unsupported.unwrap().to_string());
}
#[test]
fn protocol_all_supported_with_unsupported_high_version() {
let protocols: UnvalidatedProtoEntry = "Cons=1-2,999".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocols.all_supported();
assert_eq!(true, unsupported.is_some());
assert_eq!("Cons=999", &unsupported.unwrap().to_string());
}
#[test]
fn protocol_all_supported_with_mix_of_supported_and_unsupproted() {
let protocols: UnvalidatedProtoEntry = "Link=3-4 Wombat=9".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocols.all_supported();
assert_eq!(true, unsupported.is_some());
assert_eq!("Wombat=9", &unsupported.unwrap().to_string());
}
#[test]
fn protover_string_supports_protocol_returns_true_for_single_supported() {
let protocols: UnvalidatedProtoEntry = "Link=3-4 Cons=1".parse().unwrap();
let is_supported = protocols.supports_protocol(&protover::Protocol::Cons.into(), &1);
assert_eq!(true, is_supported);
}
#[test]
fn protover_string_supports_protocol_returns_false_for_single_unsupported() {
let protocols: UnvalidatedProtoEntry = "Link=3-4 Cons=1".parse().unwrap();
let is_supported = protocols.supports_protocol(&protover::Protocol::Cons.into(), &2);
assert_eq!(false, is_supported);
}
#[test]
fn protover_string_supports_protocol_returns_false_for_unsupported() {
let protocols: UnvalidatedProtoEntry = "Link=3-4".parse().unwrap();
let is_supported = protocols.supports_protocol(&protover::Protocol::Cons.into(), &2);
assert_eq!(false, is_supported);
}
#[test]
#[should_panic]
fn parse_protocol_with_unexpected_characters() {
let _: UnvalidatedProtoEntry = "Cons=*-%".parse().unwrap();
}
#[test]
#[should_panic]
fn protover_compute_vote_returns_empty_for_empty_string() {
let protocols: &[UnvalidatedProtoEntry] = &["".parse().unwrap()];
let listed = ProtoverVote::compute(protocols, &1);
assert_eq!("", listed.to_string());
}
#[test]
fn protover_compute_vote_returns_single_protocol_for_matching() {
let protocols: &[UnvalidatedProtoEntry] = &["Cons=1".parse().unwrap()];
let listed = ProtoverVote::compute(protocols, &1);
assert_eq!("Cons=1", listed.to_string());
}
#[test]
fn protover_compute_vote_returns_two_protocols_for_two_matching() {
let protocols: &[UnvalidatedProtoEntry] = &["Link=1 Cons=1".parse().unwrap()];
let listed = ProtoverVote::compute(protocols, &1);
assert_eq!("Cons=1 Link=1", listed.to_string());
}
#[test]
fn protover_compute_vote_returns_one_protocol_when_one_out_of_two_matches() {
let protocols: &[UnvalidatedProtoEntry] =
&["Cons=1 Link=2".parse().unwrap(), "Cons=1".parse().unwrap()];
let listed = ProtoverVote::compute(protocols, &2);
assert_eq!("Cons=1", listed.to_string());
}
#[test]
fn protover_compute_vote_returns_protocols_that_it_doesnt_currently_support() {
let protocols: &[UnvalidatedProtoEntry] =
&["Foo=1 Cons=2".parse().unwrap(), "Bar=1".parse().unwrap()];
let listed = ProtoverVote::compute(protocols, &1);
assert_eq!("Bar=1 Cons=2 Foo=1", listed.to_string());
}
#[test]
fn protover_compute_vote_returns_matching_for_mix() {
let protocols: &[UnvalidatedProtoEntry] = &["Link=1-10,500 Cons=1,3-7,8".parse().unwrap()];
let listed = ProtoverVote::compute(protocols, &1);
assert_eq!("Cons=1,3-8 Link=1-10,500", listed.to_string());
}
#[test]
fn protover_compute_vote_returns_matching_for_longer_mix() {
let protocols: &[UnvalidatedProtoEntry] = &[
"Desc=1-10,500 Cons=1,3-7,8".parse().unwrap(),
"Link=123-456,78 Cons=2-6,8 Desc=9".parse().unwrap(),
];
let listed = ProtoverVote::compute(protocols, &1);
assert_eq!("Cons=1-8 Desc=1-10,500 Link=78,123-456", listed.to_string());
}
#[test]
fn protover_compute_vote_returns_matching_for_longer_mix_with_threshold_two() {
let protocols: &[UnvalidatedProtoEntry] = &[
"Desc=1-10,500 Cons=1,3-7,8".parse().unwrap(),
"Link=123-456,78 Cons=2-6,8 Desc=9".parse().unwrap(),
];
let listed = ProtoverVote::compute(protocols, &2);
assert_eq!("Cons=3-6,8 Desc=9", listed.to_string());
}
#[test]
fn protover_compute_vote_handles_duplicated_versions() {
let protocols: &[UnvalidatedProtoEntry] =
&["Cons=1".parse().unwrap(), "Cons=1".parse().unwrap()];
assert_eq!("Cons=1", ProtoverVote::compute(protocols, &2).to_string());
let protocols: &[UnvalidatedProtoEntry] =
&["Cons=1-2".parse().unwrap(), "Cons=1-2".parse().unwrap()];
assert_eq!("Cons=1-2", ProtoverVote::compute(protocols, &2).to_string());
}
#[test]
fn protover_compute_vote_handles_invalid_proto_entries() {
let protocols: &[UnvalidatedProtoEntry] = &[<|fim▁hole|> ];
assert_eq!("Cons=1", ProtoverVote::compute(protocols, &2).to_string());
}
#[test]
fn parse_protocol_with_single_protocol_and_two_nonsequential_versions() {
let _: ProtoEntry = "Desc=1,2".parse().unwrap();
}
#[test]
fn protover_is_supported_here_returns_true_for_supported_protocol() {
assert_eq!(
true,
protover::is_supported_here(&protover::Protocol::Cons, &1)
);
}
#[test]
fn protover_is_supported_here_returns_false_for_unsupported_protocol() {
assert_eq!(
false,
protover::is_supported_here(&protover::Protocol::Cons, &5)
);
}
#[test]
fn protocol_all_supported_with_single_proto_and_single_version() {
let protocol: UnvalidatedProtoEntry = "Cons=1".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocol.all_supported();
assert_eq!(true, unsupported.is_none());
}
#[test]
fn protocol_all_supported_with_single_protocol_and_multiple_versions() {
let protocol: UnvalidatedProtoEntry = "Cons=1-2".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocol.all_supported();
assert_eq!(true, unsupported.is_none());
}
#[test]
fn protocol_all_supported_with_different_single_protocol_and_single_version() {
let protocol: UnvalidatedProtoEntry = "HSDir=1".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocol.all_supported();
assert_eq!(true, unsupported.is_none());
}
#[test]
fn protocol_all_supported_with_single_protocol_and_supported_version() {
let protocol: UnvalidatedProtoEntry = "Desc=2".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocol.all_supported();
assert_eq!(true, unsupported.is_none());
}
#[test]
fn protocol_all_supported_with_two_protocols_and_single_version() {
let protocols: UnvalidatedProtoEntry = "Cons=1 HSDir=1".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocols.all_supported();
assert_eq!(true, unsupported.is_none());
}
#[test]
fn protocol_all_supported_with_single_protocol_and_two_nonsequential_versions() {
let protocol: UnvalidatedProtoEntry = "Desc=1,2".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocol.all_supported();
assert_eq!(true, unsupported.is_none());
}
#[test]
fn protocol_all_supported_with_single_protocol_and_two_sequential_versions() {
let protocol: UnvalidatedProtoEntry = "Desc=1-2".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocol.all_supported();
assert_eq!(true, unsupported.is_none());
}
#[test]
fn protocol_all_supported_with_single_protocol_and_protocol_range() {
let protocol: UnvalidatedProtoEntry = "Link=1-4".parse().unwrap();
let unsupported: Option<UnvalidatedProtoEntry> = protocol.all_supported();
assert_eq!(true, unsupported.is_none());
}
// By allowing us to add to votes, the C implementation allows us to
// exceed the limit.
#[test]
fn protover_compute_vote_may_exceed_limit() {
let proto1: UnvalidatedProtoEntry = "Sleen=1-65535".parse().unwrap();
let proto2: UnvalidatedProtoEntry = "Sleen=100000".parse().unwrap();
let _result: UnvalidatedProtoEntry = ProtoverVote::compute(&[proto1, proto2], &1);
}
#[test]
fn protover_all_supported_should_exclude_versions_we_actually_do_support() {
let proto: UnvalidatedProtoEntry = "Link=3-999".parse().unwrap();
let result: String = proto.all_supported().unwrap().to_string();
assert_eq!(result, "Link=6-999".to_string());
}
#[test]
fn protover_all_supported_should_exclude_versions_we_actually_do_support_complex1() {
let proto: UnvalidatedProtoEntry = "Link=1-3,345-666".parse().unwrap();
let result: String = proto.all_supported().unwrap().to_string();
assert_eq!(result, "Link=345-666".to_string());
}
#[test]
fn protover_all_supported_should_exclude_versions_we_actually_do_support_complex2() {
let proto: UnvalidatedProtoEntry = "Link=1-3,5-12".parse().unwrap();
let result: String = proto.all_supported().unwrap().to_string();
assert_eq!(result, "Link=6-12".to_string());
}
#[test]
fn protover_all_supported_should_exclude_some_versions_and_entire_protocols() {
let proto: UnvalidatedProtoEntry = "Link=1-3,5-12 Quokka=9000-9001".parse().unwrap();
let result: String = proto.all_supported().unwrap().to_string();
assert_eq!(result, "Link=6-12 Quokka=9000-9001".to_string());
}
#[test]
fn protover_all_supported_should_not_dos_anyones_computer() {
let proto: UnvalidatedProtoEntry = "Link=1-2147483648".parse().unwrap();
let result: String = proto.all_supported().unwrap().to_string();
assert_eq!(result, "Link=6-2147483648".to_string());
}
#[test]
fn protover_all_supported_should_not_dos_anyones_computer_max_versions() {
let proto: UnvalidatedProtoEntry = "Link=1-4294967294".parse().unwrap();
let result: String = proto.all_supported().unwrap().to_string();
assert_eq!(result, "Link=6-4294967294".to_string());
}
#[test]
// C_RUST_DIFFERS: The C will return true (e.g. saying "yes, that's supported")
// but set the msg to NULL (??? seems maybe potentially bad). The Rust will
// simply return a None.
fn protover_all_supported_should_return_empty_string_for_weird_thing() {
let proto: UnvalidatedProtoEntry = "Fribble=".parse().unwrap();
let result: Option<UnvalidatedProtoEntry> = proto.all_supported();
assert!(result.is_none());
}
#[test]
fn protover_unvalidatedprotoentry_should_err_entirely_unparseable_things() {
let proto: Result<UnvalidatedProtoEntry, ProtoverError> = "Fribble".parse();
assert_eq!(Err(ProtoverError::Unparseable), proto);
}
#[test]
fn protover_all_supported_over_maximum_limit() {
let proto: Result<UnvalidatedProtoEntry, ProtoverError> = "Sleen=1-4294967295".parse();
assert_eq!(Err(ProtoverError::ExceedsMax), proto);
}<|fim▁end|> | "Cons=1".parse().unwrap(),
"Cons=1".parse().unwrap(),
"Dinosaur=1".parse().unwrap(), |
<|file_name|>overwrite.hpp<|end_file_name|><|fim▁begin|>// Boost.Range library
//
// Copyright Neil Groves 2009. Use, modification and
// distribution is subject to the Boost Software License, Version
// 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// For more information, see http://www.boost.org/libs/range/
//
#ifndef BOOST_RANGE_ALGORITHM_EXT_OVERWRITE_HPP_INCLUDED
#define BOOST_RANGE_ALGORITHM_EXT_OVERWRITE_HPP_INCLUDED
#include <boost/range/config.hpp>
#include <boost/range/concepts.hpp>
#include <boost/range/difference_type.hpp>
#include <boost/range/iterator.hpp>
#include <boost/range/begin.hpp>
#include <boost/range/end.hpp>
#include <boost/assert.hpp>
namespace mars_boost {} namespace boost = mars_boost; namespace mars_boost
{
namespace range
{
template< class SinglePassRange1, class SinglePassRange2 >
inline void overwrite( const SinglePassRange1& from, SinglePassRange2& to )
{
BOOST_RANGE_CONCEPT_ASSERT(( SinglePassRangeConcept<const SinglePassRange1> ));
BOOST_RANGE_CONCEPT_ASSERT(( SinglePassRangeConcept<SinglePassRange2> ));
BOOST_DEDUCED_TYPENAME range_iterator<const SinglePassRange1>::type
i = mars_boost::begin(from), e = mars_boost::end(from);
BOOST_DEDUCED_TYPENAME range_iterator<SinglePassRange2>::type
out = mars_boost::begin(to);
#ifndef NDEBUG
BOOST_DEDUCED_TYPENAME range_iterator<SinglePassRange2>::type
last_out = mars_boost::end(to);
#endif
for( ; i != e; ++out, ++i )
{
#ifndef NDEBUG
BOOST_ASSERT( out != last_out
&& "out of bounds in mars_boost::overwrite()" );
#endif
*out = *i;
}
}
template< class SinglePassRange1, class SinglePassRange2 >
inline void overwrite( const SinglePassRange1& from, const SinglePassRange2& to )
{
BOOST_RANGE_CONCEPT_ASSERT(( SinglePassRangeConcept<const SinglePassRange1> ));
BOOST_RANGE_CONCEPT_ASSERT(( SinglePassRangeConcept<const SinglePassRange2> ));<|fim▁hole|> i = mars_boost::begin(from), e = mars_boost::end(from);
BOOST_DEDUCED_TYPENAME range_iterator<const SinglePassRange2>::type
out = mars_boost::begin(to);
#ifndef NDEBUG
BOOST_DEDUCED_TYPENAME range_iterator<const SinglePassRange2>::type
last_out = mars_boost::end(to);
#endif
for( ; i != e; ++out, ++i )
{
#ifndef NDEBUG
BOOST_ASSERT( out != last_out
&& "out of bounds in mars_boost::overwrite()" );
#endif
*out = *i;
}
}
} // namespace range
using range::overwrite;
} // namespace mars_boost {} namespace boost = mars_boost; namespace mars_boost
#endif // include guard<|fim▁end|> |
BOOST_DEDUCED_TYPENAME range_iterator<const SinglePassRange1>::type |
<|file_name|>login.js<|end_file_name|><|fim▁begin|>export default {
userData: {},
isAuthenticated: false,<|fim▁hole|> attemptFailed: false,
reviewData: []
}<|fim▁end|> | registrationMode: false,
registrationFailed: false, |
<|file_name|>mountain_car.py<|end_file_name|><|fim▁begin|>"""
https://webdocs.cs.ualberta.ca/~sutton/MountainCar/MountainCar1.cp
"""
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
class MountainCarEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self):
self.min_position = -1.2
self.max_position = 0.6
self.max_speed = 0.07
self.goal_position = 0.5
<|fim▁hole|> self.viewer = None
self.action_space = spaces.Discrete(3)
self.observation_space = spaces.Box(self.low, self.high)
self._seed()
self.reset()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid" % (action, type(action))
position, velocity = self.state
velocity += (action-1)*0.001 + math.cos(3*position)*(-0.0025)
velocity = np.clip(velocity, -self.max_speed, self.max_speed)
position += velocity
position = np.clip(position, self.min_position, self.max_position)
if (position==self.min_position and velocity<0): velocity = 0
done = bool(position >= self.goal_position)
reward = -1.0
self.state = (position, velocity)
return np.array(self.state), reward, done, {}
def _reset(self):
self.state = np.array([self.np_random.uniform(low=-0.6, high=-0.4), 0])
return np.array(self.state)
def _height(self, xs):
return np.sin(3 * xs)*.45+.55
def _render(self, mode='human', close=False):
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
screen_width = 600
screen_height = 400
world_width = self.max_position - self.min_position
scale = screen_width/world_width
carwidth=40
carheight=20
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
xs = np.linspace(self.min_position, self.max_position, 100)
ys = self._height(xs)
xys = list(zip((xs-self.min_position)*scale, ys*scale))
self.track = rendering.make_polyline(xys)
self.track.set_linewidth(4)
self.viewer.add_geom(self.track)
clearance = 10
l,r,t,b = -carwidth/2, carwidth/2, carheight, 0
car = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
car.add_attr(rendering.Transform(translation=(0, clearance)))
self.cartrans = rendering.Transform()
car.add_attr(self.cartrans)
self.viewer.add_geom(car)
frontwheel = rendering.make_circle(carheight/2.5)
frontwheel.set_color(.5, .5, .5)
frontwheel.add_attr(rendering.Transform(translation=(carwidth/4,clearance)))
frontwheel.add_attr(self.cartrans)
self.viewer.add_geom(frontwheel)
backwheel = rendering.make_circle(carheight/2.5)
backwheel.add_attr(rendering.Transform(translation=(-carwidth/4,clearance)))
backwheel.add_attr(self.cartrans)
backwheel.set_color(.5, .5, .5)
self.viewer.add_geom(backwheel)
flagx = (self.goal_position-self.min_position)*scale
flagy1 = self._height(self.goal_position)*scale
flagy2 = flagy1 + 50
flagpole = rendering.Line((flagx, flagy1), (flagx, flagy2))
self.viewer.add_geom(flagpole)
flag = rendering.FilledPolygon([(flagx, flagy2), (flagx, flagy2-10), (flagx+25, flagy2-5)])
flag.set_color(.8,.8,0)
self.viewer.add_geom(flag)
pos = self.state[0]
self.cartrans.set_translation((pos-self.min_position)*scale, self._height(pos)*scale)
self.cartrans.set_rotation(math.cos(3 * pos))
return self.viewer.render(return_rgb_array = mode=='rgb_array')<|fim▁end|> | self.low = np.array([self.min_position, -self.max_speed])
self.high = np.array([self.max_position, self.max_speed])
|
<|file_name|>ci.conf.js<|end_file_name|><|fim▁begin|>'use strict';
var chai = require('chai');
var promised = require('chai-as-promised');
chai.use(promised);
global.expect = chai.expect;
<|fim▁hole|> allScriptsTimeout: 15000,
// Capabilities to be passed to the webdriver instance.
capabilities: {
'browserName': 'chrome',
'loggingPrefs': {
'browser': 'ALL'
}
},
// ----- What tests to run -----
//
// Spec patterns are relative to the location of the spec file. They may
// include glob patterns.
specs: [
'admin/specs/admin-user-spec.js',
'admin/specs/users-spec.js',
'admin/specs/groups-spec.js',
'admin/specs/system-spec.js',
'admin/specs/authorizations-spec.js',
'cockpit/specs/dashboard-spec.js',
'cockpit/specs/process-definition-spec.js',
'cockpit/specs/decision-definition-spec.js',
'cockpit/specs/process-instance-spec.js',
'cockpit/specs/process-definition-filter-spec.js',
'cockpit/specs/variable-spec.js',
'cockpit/specs/suspension-spec.js',
'tasklist/specs/filter-basic-spec.js',
'tasklist/specs/filter-permissions-spec.js',
'tasklist/specs/filter-criteria-spec.js',
'tasklist/specs/filter-vg-spec.js',
'tasklist/specs/process-stariables-spec.js',
'tasklist/specs/task-claiminart-spec.js',
'tasklist/specs/tasklist-sorting-spec.js',
'tasklist/specs/tasklist-search-spec.js',
'tasklist/specs/task-detail-view-spec.js',
'tasklist/specs/task-dates-spec.js'
],
// A base URL for your application under test. Calls to protractor.get()
// with relative paths will be prepended with this.
baseUrl: 'http://localhost:8080',
// ----- The test framework -----
//
// Jasmine is fully supported as a test and assertion framework.
// Mocha has limited beta support. You will need to include your own
// assertion framework if working with mocha.
framework: 'mocha',
// ----- Options to be passed to minijasminenode -----
//
// Options to be passed to Mocha-node.
// See the full list at https://github.com/juliemr/minijasminenode
mochaOpts: {
timeout: 15000,
colors: false,
reporter: 'xunit-file',
slow: 3000
}
};<|fim▁end|> | exports.config = {
// The timeout for each script run on the browser. This should be longer
// than the maximum time your application needs to stabilize between tasks. |
<|file_name|>basecamp.py<|end_file_name|><|fim▁begin|>from core import send_request
from people import get_me
from projects import get_all_active_projects, get_project
from todo_lists import get_todo_list, get_todo, get_all_active_todo_lists
from stars import get_starred_projects
try:
from MY_BC import BC
except ImportError:
from core import MY_BC_NUMBER as BC
class Camper(object):
def __init__(self,**kwargs):
if kwargs.get('name',False):
self.name = kwargs['name']
if kwargs.get('id',False):
self.id = kwargs['id']
if kwargs.get('email_address',False):
self.email_address = kwargs['email_address']
if kwargs.get('admin',False):
self.admin = kwargs['admin']
if kwargs.get('created_at',False):
self.created_at = kwargs['created_at']
if kwargs.get('updated_at',False):
self.updated_at = kwargs['updated_at']
if kwargs.get('starred_projects',False):
self._starred_projects = kwargs['starred_projects']
if kwargs.get('active_projects',False):
self._active_projects = kwargs['active_projects']
if kwargs.get('events',False):
self._events = kwargs['events']
if kwargs.get('assigned_todos',False):
self._assigned_todos = kwargs['assigned_todos']
if kwargs.get('avatar_url',False):
self.avatar_url = kwargs['avatar_url']
if kwargs.get('fullsize_avatar_url',False):
self.fullsize_avatar_url = kwargs['fullsize_avatar_url']
self.todos = send_request(url=self._assigned_todos['url'])
self.assigned_todos = []
for bucket in self.todos:
self.assigned_todos.append(bucket['assigned_todos'])
#self.starred_projects = send_request(url=self._starred_projects['url'])
self.events = send_request(url=self._events['url'])
#self.active_projects = send_request(url=self._active_projects['url'])
def get_avatar(self,filename):
fp = open(filename,'wb')
data = send_request(url=self.avatar_url,json=False)
fp.write(data.content)
fp.close()
class BaseCampPerson(object):
BC_ACCOUNT_NUM = BC
class BaseCamper(BaseCampPerson):
def __init__(self,bc_account_number=None,**kwargs):
if bc_account_number is None and kwargs.get('account',None) is None:
self.bc_number = self.BC_ACCOUNT_NUM
else:
if bc_account_number is not None:
self.bc_number = bc_account_number
else:
self.bc_number = kwargs.get('account',None)
self._internal_camper = Camper(**get_me(self.bc_number))
self._todos = []
for attr in dir(self._internal_camper):
if not attr.startswith('_'):
setattr(self,attr,getattr(self._internal_camper,attr))
self._get_todos()
self._get_projects()
def __getitem__(self,key):
if key in dir(self._internal_camper):
return self._internal_camper.__dict__[key]
def _get_todos(self):
self._todo_buckets = []
for bucket in self.assigned_todos:
tmp = []
for todo in bucket:
res = send_request(url=todo['url'])
tmp.append(res)
self._todos.append(res)
self._todo_buckets.append(tmp)
def get_project(self,pid):
return get_project(self.BC_ACCOUNT_NUM,pid)<|fim▁hole|>
def _get_projects(self):
self.pm = BCProjectManager(self)
@staticmethod
def send_basecamp_request(url):
return send_request(url=url)
@property
def todo_buckets(self):
return self._todo_buckets
@property
def current_todos(self):
return self._todos
@property
def todo_count(self):
return len(self._todos)
@property
def event_count(self):
return len(self.events)
@property
def project_count(self):
return len(self.projects)
@property
def projects(self):
return self.pm.projects
class BCProjectManager(object):
def __init__(self,camper):
self.bc = camper
self.projects = get_all_active_projects(self.bc.BC_ACCOUNT_NUM)
def get_project(self,pid):
return get_project(self.bc.BC_ACCOUNT_NUM,pid)
def get_projects(self):
return self.projects
def get_project_todolists(self,pid):
for proj in self.projects:
if proj['id'] == pid:
return send_request(url=proj['todolists']['url'])
return None<|fim▁end|> | |
<|file_name|>event.go<|end_file_name|><|fim▁begin|>/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
<|fim▁hole|>See the License for the specific language governing permissions and
limitations under the License.
*/
package events
const (
// Container event reason list
CreatedContainer = "Created"
StartedContainer = "Started"
FailedToCreateContainer = "Failed"
FailedToStartContainer = "Failed"
KillingContainer = "Killing"
PreemptContainer = "Preempting"
BackOffStartContainer = "BackOff"
ExceededGracePeriod = "ExceededGracePeriod"
// Image event reason list
PullingImage = "Pulling"
PulledImage = "Pulled"
FailedToPullImage = "Failed"
FailedToInspectImage = "InspectFailed"
ErrImageNeverPullPolicy = "ErrImageNeverPull"
BackOffPullImage = "BackOff"
// kubelet event reason list
NodeReady = "NodeReady"
NodeNotReady = "NodeNotReady"
NodeSchedulable = "NodeSchedulable"
NodeNotSchedulable = "NodeNotSchedulable"
StartingKubelet = "Starting"
KubeletSetupFailed = "KubeletSetupFailed"
FailedAttachVolume = "FailedAttachVolume"
FailedDetachVolume = "FailedDetachVolume"
FailedMountVolume = "FailedMount"
FailedUnMountVolume = "FailedUnMount"
WarnAlreadyMountedVolume = "AlreadyMountedVolume"
SuccessfulDetachVolume = "SuccessfulDetachVolume"
SuccessfulMountVolume = "SuccessfulMountVolume"
SuccessfulUnMountVolume = "SuccessfulUnMountVolume"
HostPortConflict = "HostPortConflict"
NodeSelectorMismatching = "NodeSelectorMismatching"
InsufficientFreeCPU = "InsufficientFreeCPU"
InsufficientFreeMemory = "InsufficientFreeMemory"
HostNetworkNotSupported = "HostNetworkNotSupported"
UndefinedShaper = "NilShaper"
NodeRebooted = "Rebooted"
ContainerGCFailed = "ContainerGCFailed"
ImageGCFailed = "ImageGCFailed"
FailedNodeAllocatableEnforcement = "FailedNodeAllocatableEnforcement"
SuccessfulNodeAllocatableEnforcement = "NodeAllocatableEnforced"
UnsupportedMountOption = "UnsupportedMountOption"
SandboxChanged = "SandboxChanged"
FailedCreatePodSandBox = "FailedCreatePodSandBox"
// Image manager event reason list
InvalidDiskCapacity = "InvalidDiskCapacity"
FreeDiskSpaceFailed = "FreeDiskSpaceFailed"
// Probe event reason list
ContainerUnhealthy = "Unhealthy"
// Pod worker event reason list
FailedSync = "FailedSync"
// Config event reason list
FailedValidation = "FailedValidation"
// Lifecycle hooks
FailedPostStartHook = "FailedPostStartHook"
FailedPreStopHook = "FailedPreStopHook"
UnfinishedPreStopHook = "UnfinishedPreStopHook"
)<|fim▁end|> | Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
<|file_name|>update.go<|end_file_name|><|fim▁begin|>package system
import (
// HOFSTADTER_START import
"fmt"
// HOFSTADTER_END import
// custom imports
// infered imports
"github.com/spf13/cobra"
)
// Tool: geb
// Name: update
// Usage: update
// Parent: system
<|fim▁hole|>// HOFSTADTER_START var
// HOFSTADTER_END var
// HOFSTADTER_START init
// HOFSTADTER_END init
var UpdateLong = `Update the geb library DSLs, designs, and other files in the dot folder.`
var UpdateCmd = &cobra.Command{
Use: "update",
Short: "Update the geb library and dot folder",
Long: UpdateLong,
Run: func(cmd *cobra.Command, args []string) {
logger.Debug("In updateCmd", "args", args)
// Argument Parsing
// HOFSTADTER_START cmd_run
fmt.Println("geb system update will be updated when the website goes live.")
// HOFSTADTER_END cmd_run
},
}
func init() {
// add sub-commands to this command when present
}
// HOFSTADTER_BELOW<|fim▁end|> | // HOFSTADTER_START const
// HOFSTADTER_END const
|
<|file_name|>hoisting_sf.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# calculates safety factor in hoisting lines with various number of parts & rotating sheaves
# (c) 2013, D. Djokic
# No guaranties, whatsoever - use code on your own risk
# Released under GNU General Public License
'''
IADC safety factor recommendations:
drilling and other routine operations = 3
mast rising = 2.5
setting casing = 2
jarring = 2
'''
def get_float(message, default):
#get float number - error check included
try:
f=input (message)
st=type(f)
if f==0:
f=default
return float(f) ##dodo
elif f==" ":
f=default
return float(f) ##dodo
else:
return float(f)
except:
print("Wrong Input! Try again")
return(get_float(message, default))
def get_integer(message, default):
#get integer number - error check included
try:
f=input (message)
st=type(f)
if f==0:
f=default
return int(f)
elif f==" ":
f=default
return int(f)
else:
return int(f)
except:
print("Wrong Input! Try again")
return(get_integer(message, default))
def write_file (file, description, var):
#write to file
file.write ("\n")<|fim▁hole|> file.write (str(var))
W = get_float ("Hoisted Weight in tones (1 ton = 2000lb) = ", 40)
L = get_float ("Capacity of existing wire rope in tones (1 ton = 2000lb): ", 90)
n = get_integer ("Number of lines or '0' for default of 4 = ", 4)
s = get_integer ("Number of rotating sheaves or '0' for equal to number of lines = ", n)
print ("Sheave bearing factor: 1.045 for Bronze Bushing; 1.02 for Roller Bearing" )
K = get_float ("Sheave roller bearing friction factor - enter '0' for default of 1.045 = ", 1.045)
print ("Wire line efficiency due to bending")
print ("1 - D/d ratio = 25:1 - API 9A")
print ("2 - D/d ratio = 40:1 - API 9A")
print ("3 - Input your data for wire line efficiency")
dratio = get_integer ("Choose 1, 2 or 3: ", 1)
if dratio == 1:
wire_eff = 0.95
elif dratio == 2:
wire_eff = 0.97
else:
wire_eff = get_float ("Input wire line efficiency due to bending <1: ", 0.95)
#sfact=L*wire_eff*(K**n-1)/((W*K**s)*(K-1))
mechEfficiency = (K**n-1)/((K**s)*(K-1))
mechAdv = n*mechEfficiency
linePull = W/mechAdv
linePull_bend = linePull/wire_eff
sfact = L/linePull_bend
fname = 'hoisting_sf.txt'
fn = open (fname, 'a')
fn.write ('hoisting_sf.py Output:')
print ("\n\nSafety Factor for Operation with this wire = %f" %(sfact))
print ("\nIADC Recommended Safety Factors:\n\nDrilling and other routine operations = 3\nMast Rising = 2.5\nSetting Casing = 2\nJarring = 2")
write_file (fn, "Hoisted weight in tones (1 ton = 2000 lb): ", W)
write_file (fn, "Capacity of existing wire rope in tones (1 ton = 2000lb): ", L)
write_file (fn, "Number of lines: ", n)
write_file (fn, "Number of rotating sheaves: " ,s)
write_file (fn, "Sheave roller bearing friction factor: ", K)
write_file (fn, "Wire Line Efficiency due to bending: ", wire_eff)
write_file (fn, "Safety Factor: ", sfact)
fn.write("\n\nIADC Recommended Safety Factors:")
fn.write ("\nDrilling and other routine operations = 3")
fn.write ("\nMast Rising = 2.5")
fn.write ("\nSetting Casing = 2")
fn.write ("\nJarring = 2")
fn.write ("\nValidate results! No any warranties are associated with this code!")
fn.close()
print ("Check file 'hoisting_sf.txt' in working folder!")<|fim▁end|> | file.write (str(description))
file.write ("\t") |
<|file_name|>explicit-call-to-dtor.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at<|fim▁hole|>// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct Foo {
x: int
}
impl Drop for Foo {
fn drop(&mut self) {
println!("kaboom");
}
}
fn main() {
let x = Foo { x: 3 };
x.drop(); //~ ERROR explicit use of destructor method
}<|fim▁end|> | // http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or |
<|file_name|>utils.py<|end_file_name|><|fim▁begin|>"""Provides some useful utilities for the Discord bot, mostly to do with cleaning."""
import re
import discord
__all__ = ['clean', 'is_clean']
mass_mention = re.compile('@(everyone|here)')
member_mention = re.compile(r'<@\!?(\d+)>')
role_mention = re.compile(r'<@&(\d+)>')
channel_mention = re.compile(r'<#(\d+)>')
def clean(ctx, text=None, *, mass=True, member=True, role=True, channel=True):
"""Cleans the message of anything specified in the parameters passed."""
if text is None:
text = ctx.message.content
if mass:
cleaned_text = mass_mention.sub(lambda match: '@\N{ZERO WIDTH SPACE}' + match.group(1), text)
if member:
cleaned_text = member_mention.sub(lambda match: clean_member_name(ctx, int(match.group(1))), cleaned_text)
if role:
cleaned_text = role_mention.sub(lambda match: clean_role_name(ctx, int(match.group(1))), cleaned_text)
if channel:
cleaned_text = channel_mention.sub(lambda match: clean_channel_name(ctx, int(match.group(1))), cleaned_text)
return cleaned_text
def is_clean(ctx, text=None):
"""Checks if the message is clean already and doesn't need to be cleaned."""
if text is None:
text = ctx.message.content
return all(regex.search(text) is None for regex in (mass_mention, member_mention, role_mention, channel_mention))
def clean_member_name(ctx, member_id):
"""Cleans a member's name from the message."""
member = ctx.guild.get_member(member_id)
if member is None:
return '<@\N{ZERO WIDTH SPACE}%d>' % member_id
elif is_clean(ctx, member.display_name):
return member.display_name
elif is_clean(ctx, str(member)):
return str(member)
else:
return '<@\N{ZERO WIDTH SPACE}%d>' % member.id
def clean_role_name(ctx, role_id):
"""Cleans role pings from messages."""
role = discord.utils.get(ctx.guild.roles, id=role_id) # Guild.get_role doesn't exist
if role is None:
return '<@&\N{ZERO WIDTH SPACE}%d>' % role_id
elif is_clean(ctx, role.name):
return '@' + role.name
else:
return '<@&\N{ZERO WIDTH SPACE}%d>' % role.id
def clean_channel_name(ctx, channel_id):
"""Cleans channel mentions from messages."""
channel = ctx.guild.get_channel(channel_id)
if channel is None:
return '<#\N{ZERO WIDTH SPACE}%d>' % channel_id
elif is_clean(ctx, channel.name):
return '#' + channel.name
else:
return '<#\N{ZERO WIDTH SPACE}%d>' % channel.id
def pretty_concat(strings, single_suffix='', multi_suffix=''):
"""Concatenates things in a pretty way"""
if len(strings) == 1:<|fim▁hole|> return '{} and {}{}'.format(*strings, multi_suffix)
else:
return '{}, and {}{}'.format(', '.join(strings[:-1]), strings[-1], multi_suffix)<|fim▁end|> | return strings[0] + single_suffix
elif len(strings) == 2: |
<|file_name|>document_dialog.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# This file is part of Gertrude.
#
# Gertrude is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Gertrude is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gertrude; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from builtins import str as text
import traceback
import subprocess
import wx
import wx.lib.filebrowsebutton
from ooffice import *
class DocumentDialog(wx.Dialog):
def __init__(self, parent, modifications):
self.modifications = modifications
self.document_generated = False
# Instead of calling wx.Dialog.__init__ we precreate the dialog
# so we can set an extra style that must be set before
# creation, and then we create the GUI object using the Create
# method.
pre = wx.PreDialog()
pre.SetExtraStyle(wx.DIALOG_EX_CONTEXTHELP)
pre.Create(parent, -1, "Génération de document")
# This next step is the most important, it turns this Python
# object into the real wrapper of the dialog (instead of pre)
# as far as the wxPython extension is concerned.
self.PostCreate(pre)
self.sizer = wx.BoxSizer(wx.VERTICAL)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(wx.StaticText(self, -1, "Format :"), 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 5)
if not IsOODocument(modifications.template):
self.format = wx.Choice(self, -1, choices=["Texte"])
elif sys.platform == 'win32':
self.format = wx.Choice(self, -1, choices=["LibreOffice", "PDF"])
else:
self.format = wx.Choice(self, -1, choices=["LibreOffice"])
self.format.SetSelection(0)
self.Bind(wx.EVT_CHOICE, self.onFormat, self.format)
sizer.Add(self.format, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 5)
default_output = normalize_filename(modifications.default_output)
self.extension = os.path.splitext(default_output)[-1]
wildcard = "OpenDocument (*%s)|*%s|PDF files (*.pdf)|*.pdf" % (self.extension, self.extension)
self.fbb = wx.lib.filebrowsebutton.FileBrowseButton(self, -1,
size=(600, -1),
labelText="Nom de fichier :",
startDirectory=config.documents_directory,
initialValue=os.path.join(config.documents_directory, default_output),
fileMask=wildcard,
fileMode=wx.SAVE)
sizer.Add(self.fbb, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.sizer.Add(sizer, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.gauge = wx.Gauge(self, -1, size=(-1, 10))
self.gauge.SetRange(100)
self.sizer.Add(self.gauge, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.RIGHT | wx.LEFT | wx.TOP, 5)
line = wx.StaticLine(self, -1, size=(20, -1), style=wx.LI_HORIZONTAL)
self.sizer.Add(line, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.TOP | wx.BOTTOM, 5)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sauver_ouvrir = wx.Button(self, -1, "Sauver et ouvrir")
self.sauver_ouvrir.SetDefault()
self.Bind(wx.EVT_BUTTON, self.OnSauverOuvrir, self.sauver_ouvrir)
sizer.Add(self.sauver_ouvrir, 0, wx.LEFT | wx.RIGHT, 5)
self.sauver = wx.Button(self, -1, "Sauver")
self.Bind(wx.EVT_BUTTON, self.OnSauver, self.sauver)
sizer.Add(self.sauver, 0, wx.RIGHT, 5)
if modifications.multi:
button = wx.Button(self, -1, "Sauver individuellement")
self.Bind(wx.EVT_BUTTON, self.OnSauverUnitaire, button)
sizer.Add(button, 0, wx.RIGHT, 5)
if modifications.email:
self.sauver_envoyer = wx.Button(self, -1, "Sauver et envoyer par email")
self.Bind(wx.EVT_BUTTON, self.OnSauverEnvoyer, self.sauver_envoyer)
sizer.Add(self.sauver_envoyer, 0, wx.RIGHT, 5)
if modifications.multi is False and not modifications.email_to:
self.sauver_envoyer.Disable()
if database.creche.caf_email:
self.sauver_envoyer = wx.Button(self, -1, "Sauver et envoyer par email à la CAF")
self.Bind(wx.EVT_BUTTON, self.OnSauverEnvoyerCAF, self.sauver_envoyer)
sizer.Add(self.sauver_envoyer, 0, wx.LEFT | wx.RIGHT, 5)
# btnsizer.Add(self.ok)
btn = wx.Button(self, wx.ID_CANCEL)
sizer.Add(btn, 0, wx.RIGHT, 5)
self.sizer.Add(sizer, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.SetSizer(self.sizer)
self.sizer.Fit(self)
self.CenterOnScreen()
def onFormat(self, _):
filename = os.path.splitext(self.fbb.GetValue())[0]
if self.format.GetSelection() == 0:
self.fbb.SetValue(filename + self.extension, None)
else:
self.fbb.SetValue(filename + ".pdf", None)
def Sauver(self):
self.fbb.Disable()
self.sauver.Disable()
if self.sauver_ouvrir:
self.sauver_ouvrir.Disable()<|fim▁hole|> self.filename = self.fbb.GetValue()
f, e = os.path.splitext(self.filename)
if e == ".pdf":
self.pdf = True
self.oo_filename = f + self.extension
else:
self.pdf = False
self.oo_filename = self.filename
config.documents_directory = os.path.dirname(self.filename)
dlg = None
try:
if self.modifications.multi is not False:
errors = {}
simple_modifications = self.modifications.get_simple_modifications(self.oo_filename)
for i, (filename, modifs) in enumerate(simple_modifications):
self.gauge.SetValue((100 * i) / len(simple_modifications))
errors.update(GenerateDocument(modifs, filename=filename))
if self.pdf:
f, e = os.path.splitext(filename)
convert_to_pdf(filename, f + ".pdf")
os.remove(filename)
else:
self.filename = self.filename.replace(" <prenom> <nom>", "")
self.oo_filename = self.oo_filename.replace(" <prenom> <nom>", "")
errors = GenerateDocument(self.modifications, filename=self.oo_filename, gauge=self.gauge)
if self.pdf:
convert_to_pdf(self.oo_filename, self.filename)
os.remove(self.oo_filename)
self.document_generated = True
if errors:
message = "Document %s généré avec des erreurs :\n" % self.filename
for label in errors.keys():
message += '\n' + label + ' :\n '
message += '\n '.join(errors[label])
dlg = wx.MessageDialog(self, message, 'Message', wx.OK | wx.ICON_WARNING)
except IOError:
print(sys.exc_info())
dlg = wx.MessageDialog(self, "Impossible de sauver le document. Peut-être est-il déjà ouvert ?", 'Erreur',
wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
return
except Exception as e:
info = sys.exc_info()
message = ' [type: %s value: %s traceback: %s]' % (info[0], info[1], traceback.extract_tb(info[2]))
dlg = wx.MessageDialog(self, message, 'Erreur', wx.OK | wx.ICON_WARNING)
if dlg:
dlg.ShowModal()
dlg.Destroy()
self.EndModal(wx.ID_OK)
def OnSauver(self, _):
self.modifications.multi = False
self.Sauver()
def OnSauverOuvrir(self, event):
self.OnSauver(event)
if self.document_generated:
if self.filename.endswith(".pdf"):
StartAcrobatReader(self.filename)
else:
StartLibreOffice(self.filename)
def OnSauverUnitaire(self, _):
self.Sauver()
def OnSauverEnvoyer(self, event):
self.OnSauverUnitaire(event)
if self.document_generated:
if self.modifications.multi is not False:
simple_modifications = self.modifications.get_simple_modifications(self.oo_filename)
emails = '\n'.join(
[" - %s (%s)" % (modifs.email_subject, ", ".join(modifs.email_to)) for filename, modifs in
simple_modifications])
if len(emails) > 1000:
emails = emails[:1000] + "\n..."
dlg = wx.MessageDialog(self, "Ces emails seront envoyés :\n" + emails, 'Confirmation',
wx.OK | wx.CANCEL | wx.ICON_WARNING)
response = dlg.ShowModal()
dlg.Destroy()
if response != wx.ID_OK:
return
for filename, modifs in simple_modifications:
if self.pdf:
oo_filename = filename
filename, e = os.path.splitext(oo_filename)
filename += ".pdf"
try:
SendDocument(filename, modifs)
except Exception as e:
dlg = wx.MessageDialog(self, "Impossible d'envoyer le document %s\n%r" % (filename, e),
'Erreur', wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
else:
try:
SendDocument(self.filename, self.modifications)
except Exception as e:
dlg = wx.MessageDialog(self, "Impossible d'envoyer le document %s\n%r" % (self.filename, e),
'Erreur', wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
def OnSauverEnvoyerCAF(self, event):
self.OnSauver(event)
if self.document_generated:
try:
root, ext = os.path.splitext(self.modifications.introduction_filename)
introduction_filename = root + " CAF" + ext
SendDocument(self.filename, self.modifications, to=[database.creche.caf_email], introduction_filename=GetTemplateFile(introduction_filename))
except Exception as e:
dlg = wx.MessageDialog(self, "Impossible d'envoyer le document %s\n%r" % (self.filename, e), "Erreur", wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
def StartLibreOffice(filename):
if sys.platform == 'win32':
filename = "".join(["file:", urllib.pathname2url(os.path.abspath(filename.encode("utf-8")))])
# print filename
try:
StarDesktop, objServiceManager, core_reflection = getOOoContext()
StarDesktop.LoadComponentFromURL(filename, "_blank", 0, MakePropertyValues(objServiceManager, [
["ReadOnly", False],
["Hidden", False]]))
except Exception as e:
print("Exception ouverture LibreOffice", e)
dlg = wx.MessageDialog(None, "Impossible d'ouvrir le document\n%r" % e, "Erreur", wx.OK|wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
else:
paths = []
if sys.platform == "darwin":
paths.append("/Applications/LibreOffice.app/Contents/MacOS/soffice")
paths.append("/Applications/OpenOffice.app/Contents/MacOS/soffice")
else:
paths.append("/usr/bin/libreoffice")
paths.append("ooffice")
for path in paths:
try:
print(path, filename)
subprocess.Popen([path, filename])
return
except Exception as e:
print(e)
pass
dlg = wx.MessageDialog(None, "Impossible de lancer OpenOffice / LibreOffice", 'Erreur', wx.OK|wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
DDE_ACROBAT_STRINGS = ["AcroviewR15", "AcroviewA15", "AcroviewR12", "AcroviewA12", "AcroviewR11", "AcroviewA11",
"AcroviewR10", "AcroviewA10", "acroview"]
dde_server = None
def StartAcrobatReader(filename):
global dde_server
import win32api
import win32ui
import dde
filename = str(os.path.abspath(filename))
path, name = os.path.split(filename)
reader = win32api.FindExecutable(name, path)
os.spawnl(os.P_NOWAIT, reader[1], " ")
for t in range(10):
time.sleep(1)
for acrobat in DDE_ACROBAT_STRINGS:
try:
if not dde_server:
dde_server = dde.CreateServer()
dde_server.Create('Gertrude')
c = dde.CreateConversation(dde_server)
c.ConnectTo(acrobat, 'control')
c.Exec('[DocOpen("%s")]' % (filename,))
return
except Exception as e:
pass
print("Impossible de lancer acrobat reader ; prochain essai dans 1s ...", e)
dlg = wx.MessageDialog(None, "Impossible d'ouvrir le document", 'Erreur', wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()<|fim▁end|> | |
<|file_name|>client.py<|end_file_name|><|fim▁begin|>import aioamqp
import asyncio
import umsgpack as msgpack
import logging
from functools import wraps
from uuid import uuid4
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
class RemoteException(Exception):
pass
class Client(object):
def __init__(self, queue='', host='localhost', port=None, ssl=False):
self._transport = None
self._protocol = None
self._channel = None
self._callback_queue = None
self._queue = queue
self._host = host
self._port = port
self._ssl = ssl
self._waiter = asyncio.Event()
async def _connect(self, *args, **kwargs):
""" an `__init__` method can't be a coroutine"""
self._transport, self._protocol = await aioamqp.connect(*args, **kwargs)
host = kwargs.get('host', 'localhost')
port = kwargs.get('port')
ssl = kwargs.get('ssl', False)
if port is None:
port = 5671 if ssl else 5672
logger.info(f'Connected to amqp://{host}:{port}/')
self._channel = await self._protocol.channel()
result = await self._channel.queue_declare(queue_name='', exclusive=True)
self._callback_queue = result['queue']
logger.info(f'Created callback queue: {self._callback_queue}')
await self._channel.basic_consume(
self._on_response,
no_ack=True,
queue_name=self._callback_queue,
)
async def _on_response(self, channel, body, envelope, properties):
if self._corr_id == properties.correlation_id:
self._response = body
logger.info(f'Received response for {self._corr_id}')
self._waiter.set()
async def __call__(self, method, *args, **kwargs):
if not self._protocol:
await self._connect(host=self._host, port=self._port, ssl=self._ssl)
self._response = None
self._corr_id = str(uuid4())
payload = msgpack.packb((method, args, kwargs))
logger.info(f'Publishing to {self._queue}: {method} ({self._corr_id})')
await self._channel.basic_publish(
payload=payload,
exchange_name='',
routing_key=self._queue,
properties={
'reply_to': self._callback_queue,
'correlation_id': self._corr_id,
},
)
logger.info(f'Waiting for response on queue {self._callback_queue} ({self._corr_id})')<|fim▁hole|> exc, result = msgpack.unpackb(self._response)
except Exception as err:
logger.error(f'Could not unpack response: {err}')
return None
if exc is not None:
raise RemoteException(exc)
return result
def __getattr__(self, method):
@wraps(self.__call__)
async def wrapper(*args, **kwargs):
return await self(method, *args, **kwargs)
return wrapper<|fim▁end|> | await self._waiter.wait()
await self._protocol.close()
try: |
<|file_name|>test_migrations_util.py<|end_file_name|><|fim▁begin|>import unittest
from db.migrations import migrations_util
class TestMigrationUtil(unittest.TestCase):
"""Test the CLI API."""
@classmethod
def setUpClass(cls):
cls.db_path = '/some/random/path/file.db'
def setUp(self):
self.parser = migrations_util.make_argument_parser(self.db_path)
def test_cli_parser_default(self):
options = self.parser.parse_args(['upgrade'])
self.assertEqual(options.path, self.db_path)
self.assertEqual(options.action, 'upgrade')
def test_cli_parser_user(self):
other_db_path = '/some/other/path/file.db'
options = self.parser.parse_args([
'downgrade',
'--path',
other_db_path
])
self.assertEqual(options.path, other_db_path)<|fim▁hole|> SystemExit,
self.parser.parse_args,
['retrograde']
)
if __name__ == '__main__':
unittest.main()<|fim▁end|> | self.assertEqual(options.action, 'downgrade')
def test_cli_parser_bad_action(self):
self.assertRaises( |
<|file_name|>logging.rs<|end_file_name|><|fim▁begin|>use super::super::namespace::InvokeContext;
use wasm_core::value::Value;
<|fim▁hole|> LoggingNs,
"logging",
LoggingImpl,
info,
warning
);
pub struct LoggingImpl;
impl LoggingImpl {
pub fn info(&self, ctx: InvokeContext) -> Option<Value> {
let text = ctx.extract_str(0, 1);
let app = ctx.app.upgrade().unwrap();
dinfo!(logger!(&app.name), "{}", text);
None
}
pub fn warning(&self, ctx: InvokeContext) -> Option<Value> {
let text = ctx.extract_str(0, 1);
let app = ctx.app.upgrade().unwrap();
dwarning!(logger!(&app.name), "{}", text);
None
}
}<|fim▁end|> | decl_namespace!( |
<|file_name|>testoak.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 Pierre Talbot (IRCAM)
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![feature(plugin, convert, path_ext, box_syntax, rustc_private)]
#![plugin(oak)]
extern crate oak_runtime;
extern crate term;
use oak_runtime::*;
use grammars::*;
use std::path::{PathBuf, Path};
use std::fs::{File, read_dir, PathExt};
use std::io;
use std::io::Read;
use term::*;
use ExpectedResult::*;
mod grammars;
#[test]
fn test_data_directory()
{
let data_path = Path::new("data/");
if !data_path.is_dir() {
panic!(format!("`{}` is not a valid data directory.", data_path.display()));
}
let mut test_path = PathBuf::new();
test_path.push(data_path);
test_path.push(Path::new("test"));
let mut test_engine = TestEngine::new(test_path);
test_engine.register("ntcc", Box::new(|content|
ntcc::recognize_ntcc(content.stream())));
test_engine.register("type_name", Box::new(|content|
type_name::recognize_type_names(content.stream())));
test_engine.register("calc", Box::new(|content|
calc::recognize_program(content.stream())));
test_engine.run();
}
struct TestEngine
{
test_path: PathBuf,
grammars: Vec<GrammarInfo>,
display: TestDisplay
}
impl TestEngine
{
fn new(test_path: PathBuf) -> TestEngine
{
if !test_path.is_dir() {
panic!(format!("`{}` is not a valid grammar directory.", test_path.display()));
}
TestEngine{
test_path: test_path,
grammars: Vec::new(),
display: TestDisplay::new()
}
}
fn register(&mut self, name: &str, recognizer: Box<for<'a> Fn(&'a str) -> ParseState<StrStream<'a>, ()>>)
{
self.grammars.push(GrammarInfo{name: String::from(name), recognizer: recognizer});
}
fn run(&mut self)
{
self.display.title(" Oak library tests suite");
for grammar in self.grammars.iter() {
let grammar_path = self.test_path.join(Path::new(grammar.name.as_str()));
self.display.info(&format!("Start tests of the grammar `{}`", grammar.name));
self.display.path(&grammar_path);
let mut test = Test{
info: grammar,
display: &mut self.display<|fim▁hole|> &grammar_path.join(Path::new("run-pass")), Match);
test.test_directory(&format!("Run and Fail tests of `{}`", grammar.name),
&grammar_path.join(Path::new("run-fail")), Error);
}
self.display.stats();
self.display.panic_if_failure();
}
}
struct GrammarInfo
{
name: String,
recognizer: Box<for<'a> Fn(&'a str) -> ParseState<StrStream<'a>, ()>>
}
#[derive(Clone)]
enum ExpectedResult {
Match,
Error
}
struct Test<'a>
{
info: &'a GrammarInfo,
display: &'a mut TestDisplay,
}
impl<'a> Test<'a>
{
fn test_directory(&mut self, start_msg: &String, directory: &Path, expectation: ExpectedResult)
{
self.display.info(start_msg);
match read_dir(directory) {
Ok(dir_entries) => {
for entry in dir_entries.map(Result::unwrap).map(|entry| entry.path()) {
if entry.is_file() {
self.test_file(entry.as_path(), expectation.clone());
} else {
self.display.warn(&format!("Entry ignored because it's not a file."));
self.display.path(entry.as_path());
}
}
}
Err(ref io_err) => {
self.display.fs_error("Can't read directory.", directory, io_err);
}
}
}
fn test_file(&mut self, filepath: &Path, expectation: ExpectedResult)
{
let mut file = File::open(filepath);
match file {
Ok(ref mut file) => {
let mut buf_contents = vec![];
let contents = file.read_to_end(&mut buf_contents);
match contents {
Ok(_) => {
let utf8_contents = std::str::from_utf8(buf_contents.as_slice());
self.test_input(utf8_contents.unwrap(), expectation, filepath);
},
Err(ref io_err) => {
self.display.fs_error("Can't read file.", filepath, io_err);
}
}
},
Err(ref io_err) => {
self.display.fs_error("Can't open file.", filepath, io_err);
}
}
}
fn test_input(&mut self, input: &str, expectation: ExpectedResult, test_path: &Path)
{
let state = (self.info.recognizer)(input);
let result = state.into_result();
match (expectation.clone(), result) {
(Match, Ok((ref state, _))) if state.full_read() => self.display.success(test_path),
(Error, Ok((ref state, _))) if state.partial_read() => self.display.success(test_path),
(Error, Err(_)) => self.display.success(test_path),
(_, state) => {
self.display.failure(test_path, expectation, state);
}
}
}
}
struct TestDisplay
{
terminal: Box<Terminal<WriterWrapper>+'static>,
num_success: u32,
num_failure: u32,
num_system_failure: u32
}
impl TestDisplay
{
pub fn new() -> TestDisplay
{
TestDisplay{
terminal: term::stdout().unwrap(),
num_success: 0,
num_failure: 0,
num_system_failure: 0
}
}
pub fn title(&mut self, msg: &str)
{
self.write_header(term::color::CYAN, msg);
self.write_msg("\n\n");
}
pub fn info(&mut self, msg: &String)
{
self.write_line(term::color::CYAN, "\n[ info ] ", msg);
}
pub fn error(&mut self, msg: &String)
{
self.write_line(term::color::RED, " [ error ] ", msg);
}
pub fn path(&mut self, path: &Path)
{
self.write_line(term::color::CYAN, " [ path ] ",
&format!("{}", path.display()));
}
pub fn stats(&mut self)
{
let system_failure_plural = if self.num_system_failure > 1 { "s" } else { "" };
let msg = format!("{} passed, {} failed, {} system failure{}.",
self.num_success, self.num_failure, self.num_system_failure,
system_failure_plural);
self.write_line(term::color::BLUE, "\n\n[ stats ] ", &msg);
}
pub fn panic_if_failure(&self)
{
if self.num_failure > 0 || self.num_system_failure > 0 {
panic!("");
}
}
pub fn failure(&mut self, path: &Path, expectation: ExpectedResult,
result: ParseResult<StrStream, ()>)
{
self.num_failure += 1;
let test_name = self.file_stem(path);
self.write_line(term::color::RED, "[ failed ] ", &test_name);
self.path(path);
self.expected(expectation);
self.wrong_result(result);
}
fn expected(&mut self, expectation: ExpectedResult)
{
let msg = match expectation {
Match => "Fully match",
Error => "Error"
};
self.write_line(term::color::CYAN, " [ expected ] ", &format!("{}", msg))
}
fn wrong_result(&mut self, result: ParseResult<StrStream, ()>)
{
let msg = match result {
Ok((ref state, ref err)) if state.partial_read() => {
format!("Partial match. `{}`", err)
}
Ok(_) => format!("Fully matched."),
Err(err) => format!("{}", err)
};
self.error(&msg)
}
pub fn success(&mut self, path: &Path)
{
self.num_success += 1;
let test_name = self.file_stem(path);
self.write_line(term::color::GREEN, "[ passed ] ", &test_name);
}
fn file_stem(&self, path: &Path) -> String
{
format!("{}", path.file_stem().unwrap().to_str().unwrap())
}
pub fn warn(&mut self, msg: &String)
{
self.write_line(term::color::YELLOW, " [ warning ] ", msg);
}
pub fn fs_error(&mut self, msg: &str, path: &Path, io_err: &io::Error)
{
self.system_failure(&format!("{}", msg));
self.path(path);
self.error(&format!("{}", io_err));
}
pub fn system_failure(&mut self, msg: &String)
{
self.num_system_failure += 1;
self.write_line(term::color::RED, "[ system error ] ", msg);
}
fn write_line(&mut self, color: color::Color, header: &str, msg: &String)
{
self.write_header(color, header);
self.write_msg(msg.as_str());
self.write_msg("\n");
}
fn write_header(&mut self, color: color::Color, header: &str)
{
self.terminal.fg(color).unwrap();
self.write_msg(header);
self.terminal.reset().unwrap();
}
fn write_msg(&mut self, msg: &str)
{
(write!(self.terminal, "{}", msg)).unwrap();
}
}<|fim▁end|> | };
test.test_directory(&format!("Run and Pass tests of `{}`", grammar.name), |
<|file_name|>picker-global-options.js<|end_file_name|><|fim▁begin|>define(['exports'], function (exports) {<|fim▁hole|> Object.defineProperty(exports, "__esModule", {
value: true
});
var globalExtraOptions = exports.globalExtraOptions = {
mappingDataStructure: {
class: 'class',
content: 'content',
disabled: 'disabled',
divider: 'divider',
groupLabel: 'group',
groupDisabled: 'disabled',
icon: 'icon',
maxOptions: 'maxOptions',
option: 'option',
subtext: 'subtext',
style: 'style',
title: 'title',
tokens: 'tokens'
}
};
var globalPickerOptions = exports.globalPickerOptions = {
dropupAuto: true,
showTick: true,
width: 'auto'
};
});<|fim▁end|> | 'use strict';
|