prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>wish.py<|end_file_name|><|fim▁begin|># # My wish for Pynamite # from __future__ import with_statement from pynamite import * from pynamite.actor import TextBox def scene1(): # define some actors x = TextBox("Pynamite") y = TextBox("Rocks!!!") # tell the first actor to enter enter(x) # wait for a keypress to continue pause()<|fim▁hole|> # fade out one actor while other comes in # # You can use with blocks # with parallel(): # fadeout(1.0,x) # fadein(1.0,y) # Or the functional notation set_var(y, "opacity", 0.0) enter(y) def together(): fadeout(4.0,x) with serial(): linear(y, "opacity", end_val=.5, duration=1.0) linear(y, "opacity", end_val=.0, duration=1.0) linear(y, "opacity", end_val=1.0, duration=2.0) #fadeout(.5,y) #fadein(.5,y) in_parallel(together) # wait for intput pause() # last actor leaves fadeout(1.0,y) pause() # add that scene to the play add_scene(scene1) def scene2(): # define the actor x = TextBox("Yes, it Rocks!!!") # set its opacity to 0.0 set_var(x, "opacity", 0.0) # have it enter (but remember it's still not visible) enter(x) # have it become visible, but in a fancy way smooth(x, "opacity", end_val=.5,duration=.5) smooth(x, "opacity", end_val=.25,duration=.25) smooth(x, "opacity", end_val=.75,duration=.5) smooth(x, "opacity", end_val=.5,duration=.25) smooth(x, "opacity", end_val=1.0,duration=.5) # wait for input pause() # have the actor leave leave() # add this scene add_scene(scene2) # run it run()<|fim▁end|>
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- ######################################################################### # # Copyright (C) 2012 OpenPlans # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ######################################################################### from urlparse import urlparse import httplib2 import urllib import logging from datetime import datetime from lxml import etree from django.conf import settings from django.db import models from django.db.models import signals from django.contrib.auth.models import User from django.contrib.contenttypes.models import ContentType from django.utils.translation import ugettext, ugettext_lazy as _ from django.core.urlresolvers import reverse from geonode import GeoNodeException from geonode.base.models import ResourceBase, ResourceBaseManager, Link, \ resourcebase_post_save, resourcebase_post_delete from geonode.utils import _user, _password, get_wms from geonode.utils import http_client from geonode.geoserver.helpers import cascading_delete from geonode.people.models import Profile from geonode.security.enumerations import AUTHENTICATED_USERS, ANONYMOUS_USERS from geonode.layers.ows import wcs_links, wfs_links, wms_links, \ wps_execute_layer_attribute_statistics from geonode.layers.enumerations import LAYER_ATTRIBUTE_NUMERIC_DATA_TYPES from geonode.utils import ogc_server_settings from geoserver.catalog import Catalog, FailedRequestError from agon_ratings.models import OverallRating logger = logging.getLogger("geonode.layers.models") class Style(models.Model): """Model for storing styles. """ name = models.CharField(_('style name'), max_length=255, unique=True) sld_title = models.CharField(max_length=255, null=True, blank=True) sld_body = models.TextField(_('sld text'), null=True, blank=True) sld_version = models.CharField(_('sld version'), max_length=12, null=True, blank=True) sld_url = models.CharField(_('sld url'), null = True, max_length=1000) workspace = models.CharField(max_length=255, null=True, blank=True) def __str__(self): return "%s" % self.name.encode('utf-8') class LayerManager(ResourceBaseManager): def __init__(self): models.Manager.__init__(self) url = ogc_server_settings.rest self.gs_catalog = Catalog(url, _user, _password) def add_bbox_query(q, bbox): '''modify the queryset q to limit to the provided bbox bbox - 4 tuple of floats representing x0,x1,y0,y1 returns the modified query ''' bbox = map(str, bbox) # 2.6 compat - float to decimal conversion q = q.filter(bbox_x0__gte=bbox[0]) q = q.filter(bbox_x1__lte=bbox[1]) q = q.filter(bbox_y0__gte=bbox[2]) return q.filter(bbox_y1__lte=bbox[3]) class Layer(ResourceBase): """ Layer (inherits ResourceBase fields) """ # internal fields objects = LayerManager() workspace = models.CharField(max_length=128) store = models.CharField(max_length=128) storeType = models.CharField(max_length=128) name = models.CharField(max_length=128) typename = models.CharField(max_length=128, unique=True) popular_count = models.IntegerField(default=0) share_count = models.IntegerField(default=0) default_style = models.ForeignKey(Style, related_name='layer_default_style', null=True, blank=True) styles = models.ManyToManyField(Style, related_name='layer_styles') def update_thumbnail(self, save=True): try: self.save_thumbnail(self._thumbnail_url(width=200, height=150), save) except RuntimeError, e: logger.warn('Could not create thumbnail for %s' % self, e) def _render_thumbnail(self, spec): resp, content = http_client.request(spec) if 'ServiceException' in content or resp.status < 200 or resp.status > 299: msg = 'Unable to obtain thumbnail: %s' % content raise RuntimeError(msg) return content def _thumbnail_url(self, width=20, height=None): """ Generate a URL representing thumbnail of the layer """ params = { 'layers': self.typename.encode('utf-8'), 'format': 'image/png8', 'width': width, } if height is not None: params['height'] = height # Avoid using urllib.urlencode here because it breaks the url. # commas and slashes in values get encoded and then cause trouble # with the WMS parser. p = "&".join("%s=%s"%item for item in params.items()) return ogc_server_settings.LOCATION + "wms/reflect?" + p def verify(self): """Makes sure the state of the layer is consistent in GeoServer and Catalogue. """ # Check the layer is in the wms get capabilities record # FIXME: Implement caching of capabilities record site wide _local_wms = get_wms() record = _local_wms.contents.get(self.typename) if record is None: msg = "WMS Record missing for layer [%s]" % self.typename.encode('utf-8') raise GeoNodeException(msg) @property def display_type(self): return ({ "dataStore" : "Vector Data", "coverageStore": "Raster Data", }).get(self.storeType, "Data") @property def store_type(self): cat = Layer.objects.gs_catalog res = cat.get_resource(self.name) res.store.fetch() return res.store.dom.find('type').text @property def service_type(self): if self.storeType == 'coverageStore': return "WCS" if self.storeType == 'dataStore': return "WFS" def get_absolute_url(self): return reverse('layer_detail', args=(self.typename,)) def attribute_config(self): #Get custom attribute sort order and labels if any cfg = {} visible_attributes = self.attribute_set.visible() if (visible_attributes.count() > 0): cfg["getFeatureInfo"] = { "fields": [l.attribute for l in visible_attributes], "propertyNames": dict([(l.attribute,l.attribute_label) for l in visible_attributes]) } return cfg def __str__(self): return "%s Layer" % self.typename.encode('utf-8') class Meta: # custom permissions, # change and delete are standard in django permissions = (('view_layer', 'Can view'), ('change_layer_permissions', "Can change permissions"), ) # Permission Level Constants # LEVEL_NONE inherited LEVEL_READ = 'layer_readonly' LEVEL_WRITE = 'layer_readwrite' LEVEL_ADMIN = 'layer_admin' def set_default_permissions(self): self.set_gen_level(ANONYMOUS_USERS, self.LEVEL_READ) self.set_gen_level(AUTHENTICATED_USERS, self.LEVEL_READ) # remove specific user permissions current_perms = self.get_all_level_info() for username in current_perms['users'].keys(): user = User.objects.get(username=username) self.set_user_level(user, self.LEVEL_NONE) # assign owner admin privileges if self.owner: self.set_user_level(self.owner, self.LEVEL_ADMIN) def tiles_url(self): return self.link_set.get(name='Tiles').url def maps(self): from geonode.maps.models import MapLayer return MapLayer.objects.filter(name=self.typename) @property def class_name(self): return self.__class__.__name__ class Layer_Styles(models.Model): layer = models.ForeignKey(Layer) style = models.ForeignKey(Style) class AttributeManager(models.Manager): """Helper class to access filtered attributes """ def visible(self): return self.get_query_set().filter(visible=True).order_by('display_order') class Attribute(models.Model): """ Auxiliary model for storing layer attributes. This helps reduce the need for runtime lookups to GeoServer, and lets users customize attribute titles, sort order, and visibility. """ layer = models.ForeignKey(Layer, blank=False, null=False, unique=False, related_name='attribute_set') attribute = models.CharField(_('attribute name'), help_text=_('name of attribute as stored in shapefile/spatial database'), max_length=255, blank=False, null=True, unique=False) description = models.CharField(_('attribute description'), help_text=_('description of attribute to be used in metadata'), max_length=255, blank=True, null=True) attribute_label = models.CharField(_('attribute label'), help_text=_('title of attribute as displayed in GeoNode'), max_length=255, blank=False, null=True, unique=False) attribute_type = models.CharField(_('attribute type'), help_text=_('the data type of the attribute (integer, string, geometry, etc)'), max_length=50, blank=False, null=False, default='xsd:string', unique=False) visible = models.BooleanField(_('visible?'), help_text=_('specifies if the attribute should be displayed in identify results'), default=True) display_order = models.IntegerField(_('display order'), help_text=_('specifies the order in which attribute should be displayed in identify results'), default=1)<|fim▁hole|> min = models.CharField(_('min'), help_text=_('minimum value for this field'), max_length=255, blank=False, null=True, unique=False, default='NA') max = models.CharField(_('max'), help_text=_('maximum value for this field'), max_length=255, blank=False, null=True, unique=False, default='NA') average = models.CharField(_('average'), help_text=_('average value for this field'), max_length=255, blank=False, null=True, unique=False, default='NA') median = models.CharField(_('median'), help_text=_('median value for this field'), max_length=255, blank=False, null=True, unique=False, default='NA') stddev = models.CharField(_('standard deviation'), help_text=_('standard deviation for this field'), max_length=255, blank=False, null=True, unique=False, default='NA') sum = models.CharField(_('sum'), help_text=_('sum value for this field'), max_length=255, blank=False, null=True, unique=False, default='NA') unique_values = models.TextField(_('unique values for this field'), null=True, blank=True, default='NA') last_stats_updated = models.DateTimeField(_('last modified'), default=datetime.now, help_text=_('date when attribute statistics were last updated')) # passing the method itself, not objects = AttributeManager() def __str__(self): return "%s" % self.attribute_label.encode("utf-8") if self.attribute_label else self.attribute.encode("utf-8") def unique_values_as_list(self): return self.unique_values.split(',') def geoserver_pre_delete(instance, sender, **kwargs): """Removes the layer from GeoServer """ ct = ContentType.objects.get_for_model(instance) OverallRating.objects.filter(content_type = ct, object_id = instance.id).delete() #cascading_delete should only be called if ogc_server_settings.BACKEND_WRITE_ENABLED == True if getattr(ogc_server_settings,"BACKEND_WRITE_ENABLED", True): cascading_delete(Layer.objects.gs_catalog, instance.typename) def pre_save_layer(instance, sender, **kwargs): if kwargs.get('raw', False): instance.owner = instance.resourcebase_ptr.owner instance.uuid = instance.resourcebase_ptr.uuid instance.bbox_x0 = instance.resourcebase_ptr.bbox_x0 instance.bbox_x1 = instance.resourcebase_ptr.bbox_x1 instance.bbox_y0 = instance.resourcebase_ptr.bbox_y0 instance.bbox_y1 = instance.resourcebase_ptr.bbox_y1 if instance.abstract == '' or instance.abstract is None: instance.abstract = 'No abstract provided' if instance.title == '' or instance.title is None: instance.title = instance.name def pre_delete_layer(instance, sender, **kwargs): """ Remove any associated style to the layer, if it is not used by other layers. Default style will be deleted in post_delete_layer """ logger.debug("Going to delete the styles associated for [%s]", instance.typename.encode('utf-8')) default_style = instance.default_style for style in instance.styles.all(): if style.layer_styles.all().count()==1: if style != default_style: style.delete() def post_delete_layer(instance, sender, **kwargs): """ Removed the layer from any associated map, if any. Remove the layer default style. """ from geonode.maps.models import MapLayer logger.debug("Going to delete associated maplayers for [%s]", instance.typename.encode('utf-8')) MapLayer.objects.filter(name=instance.typename).delete() logger.debug("Going to delete the default style for [%s]", instance.typename.encode('utf-8')) if instance.default_style and Layer.objects.filter(default_style__id=instance.default_style.id).count() == 0: instance.default_style.delete() def geoserver_pre_save(instance, sender, **kwargs): """Send information to geoserver. The attributes sent include: * Title * Abstract * Name * Keywords * Metadata Links, * Point of Contact name and url """ url = ogc_server_settings.internal_rest try: gs_catalog = Catalog(url, _user, _password) gs_resource = gs_catalog.get_resource(instance.name) except (EnvironmentError, FailedRequestError) as e: gs_resource = None msg = ('Could not connect to geoserver at "%s"' 'to save information for layer "%s"' % ( ogc_server_settings.LOCATION, instance.name.encode('utf-8')) ) logger.warn(msg, e) # If geoserver is not online, there is no need to continue return # If there is no resource returned it could mean one of two things: # a) There is a synchronization problem in geoserver # b) The unit tests are running and another geoserver is running in the # background. # For both cases it is sensible to stop processing the layer if gs_resource is None: logger.warn('Could not get geoserver resource for %s' % instance) return gs_resource.title = instance.title gs_resource.abstract = instance.abstract gs_resource.name= instance.name # Get metadata links metadata_links = [] for link in instance.link_set.metadata(): metadata_links.append((link.name, link.mime, link.url)) gs_resource.metadata_links = metadata_links #gs_resource should only be called if ogc_server_settings.BACKEND_WRITE_ENABLED == True if getattr(ogc_server_settings,"BACKEND_WRITE_ENABLED", True): gs_catalog.save(gs_resource) gs_layer = gs_catalog.get_layer(instance.name) if instance.poc and instance.poc.user: gs_layer.attribution = str(instance.poc.user) profile = Profile.objects.get(user=instance.poc.user) gs_layer.attribution_link = settings.SITEURL[:-1] + profile.get_absolute_url() #gs_layer should only be called if ogc_server_settings.BACKEND_WRITE_ENABLED == True if getattr(ogc_server_settings,"BACKEND_WRITE_ENABLED", True): gs_catalog.save(gs_layer) """Get information from geoserver. The attributes retrieved include: * Bounding Box * SRID * Download links (WMS, WCS or WFS and KML) * Styles (SLD) """ gs_resource = gs_catalog.get_resource(instance.name) bbox = gs_resource.latlon_bbox #FIXME(Ariel): Correct srid setting below #self.srid = gs_resource.src # Set bounding box values instance.bbox_x0 = bbox[0] instance.bbox_x1 = bbox[1] instance.bbox_y0 = bbox[2] instance.bbox_y1 = bbox[3] instance.update_thumbnail(save=False) def geoserver_post_save(instance, sender, **kwargs): """Save keywords to GeoServer The way keywords are implemented requires the layer to be saved to the database before accessing them. """ url = ogc_server_settings.internal_rest try: gs_catalog = Catalog(url, _user, _password) gs_resource = gs_catalog.get_resource(instance.name) except (FailedRequestError, EnvironmentError) as e: msg = ('Could not connect to geoserver at "%s"' 'to save information for layer "%s"' % ( ogc_server_settings.LOCATION, instance.name.encode('utf-8')) ) logger.warn(msg, e) # If geoserver is not online, there is no need to continue return # If there is no resource returned it could mean one of two things: # a) There is a synchronization problem in geoserver # b) The unit tests are running and another geoserver is running in the # background. # For both cases it is sensible to stop processing the layer if gs_resource is None: logger.warn('Could not get geoserver resource for %s' % instance) return gs_resource.keywords = instance.keyword_list() #gs_resource should only be called if ogc_server_settings.BACKEND_WRITE_ENABLED == True if getattr(ogc_server_settings,"BACKEND_WRITE_ENABLED", True): gs_catalog.save(gs_resource) bbox = gs_resource.latlon_bbox dx = float(bbox[1]) - float(bbox[0]) dy = float(bbox[3]) - float(bbox[2]) dataAspect = 1 if dy == 0 else dx / dy height = 550 width = int(height * dataAspect) # Set download links for WMS, WCS or WFS and KML links = wms_links(ogc_server_settings.public_url + 'wms?', instance.typename.encode('utf-8'), instance.bbox_string, instance.srid, height, width) for ext, name, mime, wms_url in links: Link.objects.get_or_create(resource= instance.resourcebase_ptr, name=ugettext(name), defaults=dict( extension=ext, url=wms_url, mime=mime, link_type='image', ) ) if instance.storeType == "dataStore": links = wfs_links(ogc_server_settings.public_url + 'wfs?', instance.typename.encode('utf-8')) for ext, name, mime, wfs_url in links: Link.objects.get_or_create(resource= instance.resourcebase_ptr, url=wfs_url, defaults=dict( extension=ext, name=name, mime=mime, url=wfs_url, link_type='data', ) ) elif instance.storeType == 'coverageStore': #FIXME(Ariel): This works for public layers, does it work for restricted too? # would those end up with no geotiff links, like, forever? permissions = {} permissions['anonymous'] = instance.get_gen_level(ANONYMOUS_USERS) permissions['authenticated'] = instance.get_gen_level(AUTHENTICATED_USERS) instance.set_gen_level(ANONYMOUS_USERS,'layer_readonly') links = wcs_links(ogc_server_settings.public_url + 'wcs?', instance.typename.encode('utf-8'), bbox=instance.bbox[:-1], crs=instance.bbox[-1], height=height, width=width) for ext, name, mime, wcs_url in links: Link.objects.get_or_create(resource= instance.resourcebase_ptr, url=wcs_url, defaults=dict( extension=ext, name=name, mime=mime, link_type='data', ) ) instance.set_gen_level(ANONYMOUS_USERS,permissions['anonymous']) instance.set_gen_level(AUTHENTICATED_USERS,permissions['authenticated']) kml_reflector_link_download = ogc_server_settings.public_url + "wms/kml?" + urllib.urlencode({ 'layers': instance.typename.encode('utf-8'), 'mode': "download" }) Link.objects.get_or_create(resource= instance.resourcebase_ptr, url=kml_reflector_link_download, defaults=dict( extension='kml', name=_("KML"), mime='text/xml', link_type='data', ) ) kml_reflector_link_view = ogc_server_settings.public_url + "wms/kml?" + urllib.urlencode({ 'layers': instance.typename.encode('utf-8'), 'mode': "refresh" }) Link.objects.get_or_create(resource= instance.resourcebase_ptr, url=kml_reflector_link_view, defaults=dict( extension='kml', name=_("View in Google Earth"), mime='text/xml', link_type='data', ) ) tile_url = ('%sgwc/service/gmaps?' % ogc_server_settings.public_url + 'layers=%s' % instance.typename.encode('utf-8') + '&zoom={z}&x={x}&y={y}' + '&format=image/png8' ) Link.objects.get_or_create(resource= instance.resourcebase_ptr, url=tile_url, defaults=dict( extension='tiles', name=_("Tiles"), mime='image/png', link_type='image', ) ) html_link_url = '%s%s' % (settings.SITEURL[:-1], instance.get_absolute_url()) Link.objects.get_or_create(resource= instance.resourcebase_ptr, url=html_link_url, defaults=dict( extension='html', name=instance.typename, mime='text/html', link_type='html', ) ) #remove links that belong to and old address for link in instance.link_set.all(): if not urlparse(settings.SITEURL).hostname == urlparse(link.url).hostname and not \ urlparse(ogc_server_settings.public_url).hostname == urlparse(link.url).hostname: link.delete() #Save layer attributes set_attributes(instance) #Save layer styles set_styles(instance, gs_catalog) def set_styles(layer, gs_catalog): style_set = [] gs_layer = gs_catalog.get_layer(layer.name) default_style = gs_layer.default_style layer.default_style = save_style(default_style) style_set.append(layer.default_style) alt_styles = gs_layer.styles for alt_style in alt_styles: style_set.append(save_style(alt_style)) layer.styles = style_set return layer def save_style(gs_style): style, created = Style.objects.get_or_create(name = gs_style.sld_name) style.sld_title = gs_style.sld_title style.sld_body = gs_style.sld_body style.sld_url = gs_style.body_href() style.save() return style def is_layer_attribute_aggregable(store_type, field_name, field_type): """ Decipher whether layer attribute is suitable for statistical derivation """ # must be vector layer if store_type != 'dataStore': return False # must be a numeric data type if field_type not in LAYER_ATTRIBUTE_NUMERIC_DATA_TYPES: return False # must not be an identifier type field if field_name.lower() in ['id', 'identifier']: return False return True def get_attribute_statistics(layer_name, field): """ Generate statistics (range, mean, median, standard deviation, unique values) for layer attribute """ logger.debug('Deriving aggregate statistics for attribute %s', field) if not ogc_server_settings.WPS_ENABLED: return None try: return wps_execute_layer_attribute_statistics(layer_name, field) except Exception: logger.exception('Error generating layer aggregate statistics') def set_attributes(layer, overwrite=False): """ Retrieve layer attribute names & types from Geoserver, then store in GeoNode database using Attribute model """ #Appending authorizations seems necessary to avoid 'layer not found' from GeoServer http = httplib2.Http() http.add_credentials(_user, _password) _netloc = urlparse(ogc_server_settings.LOCATION).netloc http.authorizations.append( httplib2.BasicAuthentication( (_user, _password), _netloc, ogc_server_settings.LOCATION, {}, None, None, http ) ) attribute_map = [] if layer.storeType == "dataStore": dft_url = ogc_server_settings.LOCATION + "wfs?" + urllib.urlencode({ "service": "wfs", "version": "1.0.0", "request": "DescribeFeatureType", "typename": layer.typename.encode('utf-8'), }) try: body = http.request(dft_url)[1] doc = etree.fromstring(body) path = ".//{xsd}extension/{xsd}sequence/{xsd}element".format(xsd="{http://www.w3.org/2001/XMLSchema}") attribute_map = [[n.attrib["name"],n.attrib["type"]] for n in doc.findall(path)] except Exception: attribute_map = [] elif layer.storeType == "coverageStore": dc_url = ogc_server_settings.LOCATION + "wcs?" + urllib.urlencode({ "service": "wcs", "version": "1.1.0", "request": "DescribeCoverage", "identifiers": layer.typename.encode('utf-8') }) try: response, body = http.request(dc_url) doc = etree.fromstring(body) path = ".//{wcs}Axis/{wcs}AvailableKeys/{wcs}Key".format(wcs="{http://www.opengis.net/wcs/1.1.1}") attribute_map = [[n.text,"raster"] for n in doc.findall(path)] except Exception: attribute_map = [] attributes = layer.attribute_set.all() # Delete existing attributes if they no longer exist in an updated layer for la in attributes: lafound = False for field, ftype in attribute_map: if field == la.attribute: lafound = True if overwrite or not lafound: logger.debug("Going to delete [%s] for [%s]", la.attribute, layer.name.encode('utf-8')) la.delete() # Add new layer attributes if they don't already exist if attribute_map is not None: iter = len(Attribute.objects.filter(layer=layer)) + 1 for field, ftype in attribute_map: if field is not None: la, created = Attribute.objects.get_or_create(layer=layer, attribute=field, attribute_type=ftype) if created: if is_layer_attribute_aggregable(layer.storeType, field, ftype): logger.debug("Generating layer attribute statistics") result = get_attribute_statistics(layer.name, field) if result is not None: la.count = result['Count'] la.min = result['Min'] la.max = result['Max'] la.average = result['Average'] la.median = result['Median'] la.stddev = result['StandardDeviation'] la.sum = result['Sum'] la.unique_values = result['unique_values'] la.last_stats_updated = datetime.now() la.attribute_label = field.title() la.visible = ftype.find("gml:") != 0 la.display_order = iter la.save() iter += 1 logger.debug("Created [%s] attribute for [%s]", field, layer.name.encode('utf-8')) else: logger.debug("No attributes found") signals.pre_save.connect(pre_save_layer, sender=Layer) signals.pre_save.connect(geoserver_pre_save, sender=Layer) signals.pre_delete.connect(geoserver_pre_delete, sender=Layer) signals.post_save.connect(geoserver_post_save, sender=Layer) signals.pre_delete.connect(pre_delete_layer, sender=Layer) signals.post_delete.connect(post_delete_layer, sender=Layer) signals.post_save.connect(resourcebase_post_save, sender=Layer) signals.post_delete.connect(resourcebase_post_delete, sender=Layer)<|fim▁end|>
# statistical derivations count = models.IntegerField(_('count'), help_text=_('count value for this field'), default=1)
<|file_name|>MessagesComp.tsx<|end_file_name|><|fim▁begin|>import React from 'react' import { CLOSE_CHARACTER } from '../model/constants'<|fim▁hole|> export interface MessagesCompProps { _messages: readonly string[] _removeMessageByIndex: (index: number) => void } export function MessagesComp({ _messages, _removeMessageByIndex, }: MessagesCompProps) { return ( <> {_messages.map((message, index) => ( <div key={index} className={styles.message}> <div className={styles.content}>{message}</div> <button type='button' className={styles.button} onClick={() => { _removeMessageByIndex(index) }} > {CLOSE_CHARACTER} </button> </div> ))} </> ) }<|fim▁end|>
import styles from './MessagesComp.module.css'
<|file_name|>form.ts<|end_file_name|><|fim▁begin|>export type FormItem = { label?: string; type: 'string'; default: string | null; hidden?: boolean; multiline?: boolean; } | { label?: string; type: 'number'; default: number | null; hidden?: boolean; step?: number; } | { label?: string; type: 'boolean'; default: boolean | null; hidden?: boolean; } | { label?: string; type: 'enum'; default: string | null; hidden?: boolean; enum: string[]; } | {<|fim▁hole|> default: unknown[] | null; hidden?: boolean; }; export type Form = Record<string, FormItem>;<|fim▁end|>
label?: string; type: 'array';
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>pub mod vm;<|fim▁hole|>pub mod program;<|fim▁end|>
pub mod command; pub mod parse;
<|file_name|>build.py<|end_file_name|><|fim▁begin|>import os.path from askapdev.rbuild.builders import CMake as Builder import askapdev.rbuild.utils as utils # CMake doesn't know about ROOT_DIR for blas and lapack, so need to # explicitly name them. Want to use the dynamic libraries in order # to avoid link problems with missing FORTRAN symbols. platform = utils.get_platform() libblas = "libblas.so" liblapack = "liblapack.so" if platform['system'] == 'Darwin': libblas = libblas.replace(".so", ".dylib") liblapack = liblapack.replace(".so", ".dylib") builder = Builder() builder.remote_archive = "casacore-1.6.0a.tar.bz2" cfitsio = builder.dep.get_install_path("cfitsio") wcslib = builder.dep.get_install_path("wcslib") blas = builder.dep.get_install_path("blas") lapack = builder.dep.get_install_path("lapack") fftw3 = builder.dep.get_install_path("fftw3") # CMake doesn't know about ROOT_DIR for these packages, so be explicit builder.add_option("-DBLAS_LIBRARIES=%s" % os.path.join(blas, 'lib', libblas)) builder.add_option("-DLAPACK_LIBRARIES=%s" % os.path.join(lapack, 'lib', liblapack))<|fim▁hole|>builder.add_option("-DWCSLIB_ROOT_DIR=%s" % wcslib) # but FFTW3_ROOT_DIR don't for the include part builder.add_option("-DFFTW3_DISABLE_THREADS=ON") builder.add_option("-DFFTW3_ROOT_DIR=%s" % fftw3) builder.add_option("-DFFTW3_INCLUDE_DIRS=%s/include" % fftw3) builder.add_option("-DUSE_FFTW3=ON") # save some time builder.add_option("-DBUILD_TESTING=OFF") builder.nowarnings = True # Force use of raw GNU compilers. This is due to bug #5798 soon on the Cray XC30. # Builds using the newer cmake (2.8.12) fail when cmake uses the Cray compiler # wrappers builder.add_option("-DCMAKE_C_COMPILER=gcc") builder.add_option("-DCMAKE_CXX_COMPILER=g++") builder.build()<|fim▁end|>
# these work builder.add_option("-DCFITSIO_ROOT_DIR=%s" % cfitsio)
<|file_name|>RxBus.java<|end_file_name|><|fim▁begin|><|fim▁hole|>import java.util.HashMap; import java.util.HashSet; import java.util.Map; import android.support.annotation.NonNull; import android.support.annotation.VisibleForTesting; import android.support.v4.util.Pair; import rx.Subscriber; import rx.Subscription; import rx.subjects.PublishSubject; import rx.subjects.SerializedSubject; import rx.subjects.Subject; public /* final */ class RxBus { private static final ThreadLocal<RxBus> INSTANCE = new ThreadLocal<>(); private final Map<Pair<Class, Subscriber>, Subscription> subscriptions = new HashMap<>(); private final Subject<Object, ?> bus = new SerializedSubject<>(PublishSubject.create()); public static RxBus getInstance() { final RxBus instance = RxBus.INSTANCE.get(); if (instance == null) { RxBus.INSTANCE.set(new RxBus()); return RxBus.INSTANCE.get(); } return instance; } @VisibleForTesting RxBus() { } public <T> void register(@NonNull final Class<T> eventType, @NonNull final Subscriber<T> subscriber) { final Pair<Class, Subscriber> key = Pair.create(eventType, subscriber); if (this.subscriptions.containsKey(key)) throw new IllegalArgumentException("The given subscriber is already registered"); this.subscriptions.put(key, this.bus.filter(event -> event != null && event.getClass().equals(eventType)).subscribe(value -> subscriber.onNext((T)value))); } public <T> void unregister(@NonNull final Class<T> eventType, @NonNull final Subscriber<T> subscriber) { final Pair<Class, Subscriber> key = Pair.create(eventType, subscriber); if (this.subscriptions.containsKey(key)) this.subscriptions.remove(key).unsubscribe(); } public void unregisterAll() { for (final Pair<Class, Subscriber> pair : new HashSet<>(this.subscriptions.keySet())) { this.unregister(pair.first, pair.second); } } public <T> void send(@NonNull final T event) { if (!this.subscriptions.isEmpty()) this.bus.onNext(event); } }<|fim▁end|>
package com.github.ayltai.foscam.client;
<|file_name|>registry_test.go<|end_file_name|><|fim▁begin|>package registry import ( "fmt" "net/http" "net/http/httputil" "net/url" "strings" "testing" "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/reference" "github.com/docker/engine-api/types" registrytypes "github.com/docker/engine-api/types/registry" ) var ( token = []string{"fake-token"} ) const ( imageID = "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d" REPO = "foo42/bar" ) func spawnTestRegistrySession(t *testing.T) *Session { authConfig := &types.AuthConfig{} endpoint, err := NewV1Endpoint(makeIndex("/v1/"), "", nil) if err != nil { t.Fatal(err) } userAgent := "docker test client" var tr http.RoundTripper = debugTransport{NewTransport(nil), t.Log} tr = transport.NewTransport(AuthTransport(tr, authConfig, false), DockerHeaders(userAgent, nil)...) client := HTTPClient(tr) r, err := NewSession(client, authConfig, endpoint) if err != nil { t.Fatal(err) } // In a normal scenario for the v1 registry, the client should send a `X-Docker-Token: true` // header while authenticating, in order to retrieve a token that can be later used to // perform authenticated actions. // // The mock v1 registry does not support that, (TODO(tiborvass): support it), instead, // it will consider authenticated any request with the header `X-Docker-Token: fake-token`. // // Because we know that the client's transport is an `*authTransport` we simply cast it, // in order to set the internal cached token to the fake token, and thus send that fake token // upon every subsequent requests. r.client.Transport.(*authTransport).token = token return r } func TestPingRegistryEndpoint(t *testing.T) { testPing := func(index *registrytypes.IndexInfo, expectedStandalone bool, assertMessage string) { ep, err := NewV1Endpoint(index, "", nil) if err != nil { t.Fatal(err) } regInfo, err := ep.Ping() if err != nil { t.Fatal(err) } assertEqual(t, regInfo.Standalone, expectedStandalone, assertMessage) } testPing(makeIndex("/v1/"), true, "Expected standalone to be true (default)") testPing(makeHTTPSIndex("/v1/"), true, "Expected standalone to be true (default)") testPing(makePublicIndex(), false, "Expected standalone to be false for public index") } func TestEndpoint(t *testing.T) { // Simple wrapper to fail test if err != nil expandEndpoint := func(index *registrytypes.IndexInfo) *V1Endpoint { endpoint, err := NewV1Endpoint(index, "", nil) if err != nil { t.Fatal(err) } return endpoint } assertInsecureIndex := func(index *registrytypes.IndexInfo) { index.Secure = true _, err := NewV1Endpoint(index, "", nil) assertNotEqual(t, err, nil, index.Name+": Expected error for insecure index") assertEqual(t, strings.Contains(err.Error(), "insecure-registry"), true, index.Name+": Expected insecure-registry error for insecure index") index.Secure = false } assertSecureIndex := func(index *registrytypes.IndexInfo) { index.Secure = true _, err := NewV1Endpoint(index, "", nil) assertNotEqual(t, err, nil, index.Name+": Expected cert error for secure index") assertEqual(t, strings.Contains(err.Error(), "certificate signed by unknown authority"), true, index.Name+": Expected cert error for secure index") index.Secure = false } index := &registrytypes.IndexInfo{} index.Name = makeURL("/v1/") endpoint := expandEndpoint(index) assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) assertInsecureIndex(index) index.Name = makeURL("") endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") assertInsecureIndex(index) httpURL := makeURL("") index.Name = strings.SplitN(httpURL, "://", 2)[1] endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), httpURL+"/v1/", index.Name+": Expected endpoint to be "+httpURL+"/v1/") assertInsecureIndex(index) index.Name = makeHTTPSURL("/v1/") endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) assertSecureIndex(index) index.Name = makeHTTPSURL("") endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") assertSecureIndex(index) httpsURL := makeHTTPSURL("") index.Name = strings.SplitN(httpsURL, "://", 2)[1] endpoint = expandEndpoint(index) assertEqual(t, endpoint.String(), httpsURL+"/v1/", index.Name+": Expected endpoint to be "+httpsURL+"/v1/") assertSecureIndex(index) badEndpoints := []string{ "http://127.0.0.1/v1/", "https://127.0.0.1/v1/", "http://127.0.0.1", "https://127.0.0.1", "127.0.0.1", } for _, address := range badEndpoints { index.Name = address _, err := NewV1Endpoint(index, "", nil) checkNotEqual(t, err, nil, "Expected error while expanding bad endpoint") } } func TestGetRemoteHistory(t *testing.T) { r := spawnTestRegistrySession(t) hist, err := r.GetRemoteHistory(imageID, makeURL("/v1/")) if err != nil { t.Fatal(err) } assertEqual(t, len(hist), 2, "Expected 2 images in history") assertEqual(t, hist[0], imageID, "Expected "+imageID+"as first ancestry") assertEqual(t, hist[1], "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", "Unexpected second ancestry") } func TestLookupRemoteImage(t *testing.T) { r := spawnTestRegistrySession(t) err := r.LookupRemoteImage(imageID, makeURL("/v1/")) assertEqual(t, err, nil, "Expected error of remote lookup to nil") if err := r.LookupRemoteImage("abcdef", makeURL("/v1/")); err == nil { t.Fatal("Expected error of remote lookup to not nil") } } func TestGetRemoteImageJSON(t *testing.T) { r := spawnTestRegistrySession(t) json, size, err := r.GetRemoteImageJSON(imageID, makeURL("/v1/")) if err != nil { t.Fatal(err) } assertEqual(t, size, int64(154), "Expected size 154") if len(json) == 0 { t.Fatal("Expected non-empty json") } _, _, err = r.GetRemoteImageJSON("abcdef", makeURL("/v1/")) if err == nil { t.Fatal("Expected image not found error") } } func TestGetRemoteImageLayer(t *testing.T) { r := spawnTestRegistrySession(t) data, err := r.GetRemoteImageLayer(imageID, makeURL("/v1/"), 0) if err != nil { t.Fatal(err) } if data == nil { t.Fatal("Expected non-nil data result") } _, err = r.GetRemoteImageLayer("abcdef", makeURL("/v1/"), 0) if err == nil { t.Fatal("Expected image not found error") } } func TestGetRemoteTag(t *testing.T) { r := spawnTestRegistrySession(t) repoRef, err := reference.ParseNamed(REPO) if err != nil { t.Fatal(err) } tag, err := r.GetRemoteTag([]string{makeURL("/v1/")}, repoRef, "test") if err != nil { t.Fatal(err) } assertEqual(t, tag, imageID, "Expected tag test to map to "+imageID) bazRef, err := reference.ParseNamed("foo42/baz") if err != nil { t.Fatal(err) } _, err = r.GetRemoteTag([]string{makeURL("/v1/")}, bazRef, "foo") if err != ErrRepoNotFound { t.Fatal("Expected ErrRepoNotFound error when fetching tag for bogus repo") } } func TestGetRemoteTags(t *testing.T) { r := spawnTestRegistrySession(t) repoRef, err := reference.ParseNamed(REPO) if err != nil { t.Fatal(err) } tags, err := r.GetRemoteTags([]string{makeURL("/v1/")}, repoRef) if err != nil { t.Fatal(err) } assertEqual(t, len(tags), 2, "Expected two tags") assertEqual(t, tags["latest"], imageID, "Expected tag latest to map to "+imageID) assertEqual(t, tags["test"], imageID, "Expected tag test to map to "+imageID) bazRef, err := reference.ParseNamed("foo42/baz") if err != nil { t.Fatal(err) } _, err = r.GetRemoteTags([]string{makeURL("/v1/")}, bazRef) if err != ErrRepoNotFound { t.Fatal("Expected ErrRepoNotFound error when fetching tags for bogus repo") } } func TestGetRepositoryData(t *testing.T) { r := spawnTestRegistrySession(t) parsedURL, err := url.Parse(makeURL("/v1/")) if err != nil { t.Fatal(err) } host := "http://" + parsedURL.Host + "/v1/" repoRef, err := reference.ParseNamed(REPO) if err != nil { t.Fatal(err) } data, err := r.GetRepositoryData(repoRef) if err != nil { t.Fatal(err) } assertEqual(t, len(data.ImgList), 2, "Expected 2 images in ImgList") assertEqual(t, len(data.Endpoints), 2, fmt.Sprintf("Expected 2 endpoints in Endpoints, found %d instead", len(data.Endpoints))) assertEqual(t, data.Endpoints[0], host, fmt.Sprintf("Expected first endpoint to be %s but found %s instead", host, data.Endpoints[0])) assertEqual(t, data.Endpoints[1], "http://test.example.com/v1/", fmt.Sprintf("Expected first endpoint to be http://test.example.com/v1/ but found %s instead", data.Endpoints[1])) } func TestPushImageJSONRegistry(t *testing.T) { r := spawnTestRegistrySession(t) imgData := &ImgData{ ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", } err := r.PushImageJSONRegistry(imgData, []byte{0x42, 0xdf, 0x0}, makeURL("/v1/")) if err != nil { t.Fatal(err) } } func TestPushImageLayerRegistry(t *testing.T) { r := spawnTestRegistrySession(t) layer := strings.NewReader("") _, _, err := r.PushImageLayerRegistry(imageID, layer, makeURL("/v1/"), []byte{}) if err != nil { t.Fatal(err) } } func TestParseRepositoryInfo(t *testing.T) { type staticRepositoryInfo struct { Index *registrytypes.IndexInfo RemoteName string CanonicalName string LocalName string Official bool } expectedRepoInfos := map[string]staticRepositoryInfo{ "fooo/bar": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "fooo/bar", LocalName: "fooo/bar", CanonicalName: "docker.io/fooo/bar", Official: false, }, "library/ubuntu": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "library/ubuntu", LocalName: "ubuntu", CanonicalName: "docker.io/library/ubuntu", Official: true, }, "nonlibrary/ubuntu": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "nonlibrary/ubuntu", LocalName: "nonlibrary/ubuntu", CanonicalName: "docker.io/nonlibrary/ubuntu", Official: false, }, "ubuntu": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "library/ubuntu", LocalName: "ubuntu", CanonicalName: "docker.io/library/ubuntu", Official: true, }, "other/library": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "other/library", LocalName: "other/library", CanonicalName: "docker.io/other/library", Official: false, }, "127.0.0.1:8000/private/moonbase": { Index: &registrytypes.IndexInfo{ Name: "127.0.0.1:8000", Official: false, }, RemoteName: "private/moonbase", LocalName: "127.0.0.1:8000/private/moonbase", CanonicalName: "127.0.0.1:8000/private/moonbase", Official: false, }, "127.0.0.1:8000/privatebase": { Index: &registrytypes.IndexInfo{ Name: "127.0.0.1:8000", Official: false, }, RemoteName: "privatebase", LocalName: "127.0.0.1:8000/privatebase", CanonicalName: "127.0.0.1:8000/privatebase", Official: false, }, "localhost:8000/private/moonbase": { Index: &registrytypes.IndexInfo{ Name: "localhost:8000", Official: false, }, RemoteName: "private/moonbase", LocalName: "localhost:8000/private/moonbase", CanonicalName: "localhost:8000/private/moonbase", Official: false, }, "localhost:8000/privatebase": { Index: &registrytypes.IndexInfo{ Name: "localhost:8000", Official: false, }, RemoteName: "privatebase", LocalName: "localhost:8000/privatebase", CanonicalName: "localhost:8000/privatebase", Official: false, }, "example.com/private/moonbase": { Index: &registrytypes.IndexInfo{ Name: "example.com", Official: false, }, RemoteName: "private/moonbase", LocalName: "example.com/private/moonbase", CanonicalName: "example.com/private/moonbase", Official: false, }, "example.com/privatebase": { Index: &registrytypes.IndexInfo{ Name: "example.com", Official: false, }, RemoteName: "privatebase", LocalName: "example.com/privatebase", CanonicalName: "example.com/privatebase", Official: false, }, "example.com:8000/private/moonbase": { Index: &registrytypes.IndexInfo{ Name: "example.com:8000", Official: false, }, RemoteName: "private/moonbase", LocalName: "example.com:8000/private/moonbase", CanonicalName: "example.com:8000/private/moonbase", Official: false, }, "example.com:8000/privatebase": { Index: &registrytypes.IndexInfo{ Name: "example.com:8000", Official: false, }, RemoteName: "privatebase", LocalName: "example.com:8000/privatebase", CanonicalName: "example.com:8000/privatebase", Official: false, }, "localhost/private/moonbase": { Index: &registrytypes.IndexInfo{ Name: "localhost", Official: false, }, RemoteName: "private/moonbase", LocalName: "localhost/private/moonbase", CanonicalName: "localhost/private/moonbase", Official: false, }, "localhost/privatebase": { Index: &registrytypes.IndexInfo{ Name: "localhost", Official: false, }, RemoteName: "privatebase", LocalName: "localhost/privatebase", CanonicalName: "localhost/privatebase", Official: false, }, IndexName + "/public/moonbase": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "public/moonbase", LocalName: "public/moonbase", CanonicalName: "docker.io/public/moonbase", Official: false, }, "index." + IndexName + "/public/moonbase": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "public/moonbase", LocalName: "public/moonbase", CanonicalName: "docker.io/public/moonbase", Official: false, }, "ubuntu-12.04-base": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "library/ubuntu-12.04-base", LocalName: "ubuntu-12.04-base", CanonicalName: "docker.io/library/ubuntu-12.04-base", Official: true, }, IndexName + "/ubuntu-12.04-base": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "library/ubuntu-12.04-base", LocalName: "ubuntu-12.04-base", CanonicalName: "docker.io/library/ubuntu-12.04-base", Official: true, }, "index." + IndexName + "/ubuntu-12.04-base": { Index: &registrytypes.IndexInfo{ Name: IndexName, Official: true, }, RemoteName: "library/ubuntu-12.04-base", LocalName: "ubuntu-12.04-base", CanonicalName: "docker.io/library/ubuntu-12.04-base", Official: true, }, } for reposName, expectedRepoInfo := range expectedRepoInfos { named, err := reference.WithName(reposName) if err != nil { t.Error(err) } repoInfo, err := ParseRepositoryInfo(named) if err != nil { t.Error(err) } else { checkEqual(t, repoInfo.Index.Name, expectedRepoInfo.Index.Name, reposName) checkEqual(t, repoInfo.RemoteName(), expectedRepoInfo.RemoteName, reposName) checkEqual(t, repoInfo.Name(), expectedRepoInfo.LocalName, reposName) checkEqual(t, repoInfo.FullName(), expectedRepoInfo.CanonicalName, reposName) checkEqual(t, repoInfo.Index.Official, expectedRepoInfo.Index.Official, reposName) checkEqual(t, repoInfo.Official, expectedRepoInfo.Official, reposName) } } } func TestNewIndexInfo(t *testing.T) { testIndexInfo := func(config *serviceConfig, expectedIndexInfos map[string]*registrytypes.IndexInfo) { for indexName, expectedIndexInfo := range expectedIndexInfos { index, err := newIndexInfo(config, indexName) if err != nil { t.Fatal(err) } else { checkEqual(t, index.Name, expectedIndexInfo.Name, indexName+" name")<|fim▁hole|> checkEqual(t, index.Official, expectedIndexInfo.Official, indexName+" is official") checkEqual(t, index.Secure, expectedIndexInfo.Secure, indexName+" is secure") checkEqual(t, len(index.Mirrors), len(expectedIndexInfo.Mirrors), indexName+" mirrors") } } } config := newServiceConfig(ServiceOptions{}) noMirrors := []string{} expectedIndexInfos := map[string]*registrytypes.IndexInfo{ IndexName: { Name: IndexName, Official: true, Secure: true, Mirrors: noMirrors, }, "index." + IndexName: { Name: IndexName, Official: true, Secure: true, Mirrors: noMirrors, }, "example.com": { Name: "example.com", Official: false, Secure: true, Mirrors: noMirrors, }, "127.0.0.1:5000": { Name: "127.0.0.1:5000", Official: false, Secure: false, Mirrors: noMirrors, }, } testIndexInfo(config, expectedIndexInfos) publicMirrors := []string{"http://mirror1.local", "http://mirror2.local"} config = makeServiceConfig(publicMirrors, []string{"example.com"}) expectedIndexInfos = map[string]*registrytypes.IndexInfo{ IndexName: { Name: IndexName, Official: true, Secure: true, Mirrors: publicMirrors, }, "index." + IndexName: { Name: IndexName, Official: true, Secure: true, Mirrors: publicMirrors, }, "example.com": { Name: "example.com", Official: false, Secure: false, Mirrors: noMirrors, }, "example.com:5000": { Name: "example.com:5000", Official: false, Secure: true, Mirrors: noMirrors, }, "127.0.0.1": { Name: "127.0.0.1", Official: false, Secure: false, Mirrors: noMirrors, }, "127.0.0.1:5000": { Name: "127.0.0.1:5000", Official: false, Secure: false, Mirrors: noMirrors, }, "other.com": { Name: "other.com", Official: false, Secure: true, Mirrors: noMirrors, }, } testIndexInfo(config, expectedIndexInfos) config = makeServiceConfig(nil, []string{"42.42.0.0/16"}) expectedIndexInfos = map[string]*registrytypes.IndexInfo{ "example.com": { Name: "example.com", Official: false, Secure: false, Mirrors: noMirrors, }, "example.com:5000": { Name: "example.com:5000", Official: false, Secure: false, Mirrors: noMirrors, }, "127.0.0.1": { Name: "127.0.0.1", Official: false, Secure: false, Mirrors: noMirrors, }, "127.0.0.1:5000": { Name: "127.0.0.1:5000", Official: false, Secure: false, Mirrors: noMirrors, }, "other.com": { Name: "other.com", Official: false, Secure: true, Mirrors: noMirrors, }, } testIndexInfo(config, expectedIndexInfos) } func TestMirrorEndpointLookup(t *testing.T) { containsMirror := func(endpoints []APIEndpoint) bool { for _, pe := range endpoints { if pe.URL.Host == "my.mirror" { return true } } return false } s := DefaultService{config: makeServiceConfig([]string{"my.mirror"}, nil)} imageName, err := reference.WithName(IndexName + "/test/image") if err != nil { t.Error(err) } pushAPIEndpoints, err := s.LookupPushEndpoints(imageName.Hostname()) if err != nil { t.Fatal(err) } if containsMirror(pushAPIEndpoints) { t.Fatal("Push endpoint should not contain mirror") } pullAPIEndpoints, err := s.LookupPullEndpoints(imageName.Hostname()) if err != nil { t.Fatal(err) } if !containsMirror(pullAPIEndpoints) { t.Fatal("Pull endpoint should contain mirror") } } func TestPushRegistryTag(t *testing.T) { r := spawnTestRegistrySession(t) repoRef, err := reference.ParseNamed(REPO) if err != nil { t.Fatal(err) } err = r.PushRegistryTag(repoRef, imageID, "stable", makeURL("/v1/")) if err != nil { t.Fatal(err) } } func TestPushImageJSONIndex(t *testing.T) { r := spawnTestRegistrySession(t) imgData := []*ImgData{ { ID: "77dbf71da1d00e3fbddc480176eac8994025630c6590d11cfc8fe1209c2a1d20", Checksum: "sha256:1ac330d56e05eef6d438586545ceff7550d3bdcb6b19961f12c5ba714ee1bb37", }, { ID: "42d718c941f5c532ac049bf0b0ab53f0062f09a03afd4aa4a02c098e46032b9d", Checksum: "sha256:bea7bf2e4bacd479344b737328db47b18880d09096e6674165533aa994f5e9f2", }, } repoRef, err := reference.ParseNamed(REPO) if err != nil { t.Fatal(err) } repoData, err := r.PushImageJSONIndex(repoRef, imgData, false, nil) if err != nil { t.Fatal(err) } if repoData == nil { t.Fatal("Expected RepositoryData object") } repoData, err = r.PushImageJSONIndex(repoRef, imgData, true, []string{r.indexEndpoint.String()}) if err != nil { t.Fatal(err) } if repoData == nil { t.Fatal("Expected RepositoryData object") } } func TestSearchRepositories(t *testing.T) { r := spawnTestRegistrySession(t) results, err := r.SearchRepositories("fakequery", 25) if err != nil { t.Fatal(err) } if results == nil { t.Fatal("Expected non-nil SearchResults object") } assertEqual(t, results.NumResults, 1, "Expected 1 search results") assertEqual(t, results.Query, "fakequery", "Expected 'fakequery' as query") assertEqual(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' to have 42 stars") } func TestTrustedLocation(t *testing.T) { for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { req, _ := http.NewRequest("GET", url, nil) if trustedLocation(req) == true { t.Fatalf("'%s' shouldn't be detected as a trusted location", url) } } for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} { req, _ := http.NewRequest("GET", url, nil) if trustedLocation(req) == false { t.Fatalf("'%s' should be detected as a trusted location", url) } } } func TestAddRequiredHeadersToRedirectedRequests(t *testing.T) { for _, urls := range [][]string{ {"http://docker.io", "https://docker.com"}, {"https://foo.docker.io:7777", "http://bar.docker.com"}, {"https://foo.docker.io", "https://example.com"}, } { reqFrom, _ := http.NewRequest("GET", urls[0], nil) reqFrom.Header.Add("Content-Type", "application/json") reqFrom.Header.Add("Authorization", "super_secret") reqTo, _ := http.NewRequest("GET", urls[1], nil) addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) if len(reqTo.Header) != 1 { t.Fatalf("Expected 1 headers, got %d", len(reqTo.Header)) } if reqTo.Header.Get("Content-Type") != "application/json" { t.Fatal("'Content-Type' should be 'application/json'") } if reqTo.Header.Get("Authorization") != "" { t.Fatal("'Authorization' should be empty") } } for _, urls := range [][]string{ {"https://docker.io", "https://docker.com"}, {"https://foo.docker.io:7777", "https://bar.docker.com"}, } { reqFrom, _ := http.NewRequest("GET", urls[0], nil) reqFrom.Header.Add("Content-Type", "application/json") reqFrom.Header.Add("Authorization", "super_secret") reqTo, _ := http.NewRequest("GET", urls[1], nil) addRequiredHeadersToRedirectedRequests(reqTo, []*http.Request{reqFrom}) if len(reqTo.Header) != 2 { t.Fatalf("Expected 2 headers, got %d", len(reqTo.Header)) } if reqTo.Header.Get("Content-Type") != "application/json" { t.Fatal("'Content-Type' should be 'application/json'") } if reqTo.Header.Get("Authorization") != "super_secret" { t.Fatal("'Authorization' should be 'super_secret'") } } } func TestIsSecureIndex(t *testing.T) { tests := []struct { addr string insecureRegistries []string expected bool }{ {IndexName, nil, true}, {"example.com", []string{}, true}, {"example.com", []string{"example.com"}, false}, {"localhost", []string{"localhost:5000"}, false}, {"localhost:5000", []string{"localhost:5000"}, false}, {"localhost", []string{"example.com"}, false}, {"127.0.0.1:5000", []string{"127.0.0.1:5000"}, false}, {"localhost", nil, false}, {"localhost:5000", nil, false}, {"127.0.0.1", nil, false}, {"localhost", []string{"example.com"}, false}, {"127.0.0.1", []string{"example.com"}, false}, {"example.com", nil, true}, {"example.com", []string{"example.com"}, false}, {"127.0.0.1", []string{"example.com"}, false}, {"127.0.0.1:5000", []string{"example.com"}, false}, {"example.com:5000", []string{"42.42.0.0/16"}, false}, {"example.com", []string{"42.42.0.0/16"}, false}, {"example.com:5000", []string{"42.42.42.42/8"}, false}, {"127.0.0.1:5000", []string{"127.0.0.0/8"}, false}, {"42.42.42.42:5000", []string{"42.1.1.1/8"}, false}, {"invalid.domain.com", []string{"42.42.0.0/16"}, true}, {"invalid.domain.com", []string{"invalid.domain.com"}, false}, {"invalid.domain.com:5000", []string{"invalid.domain.com"}, true}, {"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, false}, } for _, tt := range tests { config := makeServiceConfig(nil, tt.insecureRegistries) if sec := isSecureIndex(config, tt.addr); sec != tt.expected { t.Errorf("isSecureIndex failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec) } } } type debugTransport struct { http.RoundTripper log func(...interface{}) } func (tr debugTransport) RoundTrip(req *http.Request) (*http.Response, error) { dump, err := httputil.DumpRequestOut(req, false) if err != nil { tr.log("could not dump request") } tr.log(string(dump)) resp, err := tr.RoundTripper.RoundTrip(req) if err != nil { return nil, err } dump, err = httputil.DumpResponse(resp, false) if err != nil { tr.log("could not dump response") } tr.log(string(dump)) return resp, err }<|fim▁end|>
<|file_name|>math27.py<|end_file_name|><|fim▁begin|>from sys import argv <|fim▁hole|><|fim▁end|>
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 """tbar - Terminal Bar Number to bar in terminal. """ __version__ = "0.1" import sys from tbar.tbar import TBar from tbar.reader import Reader def main(infile, comment, sep, field, regexp, max, length, vertical): infile = infile or sys.stdin r = Reader(infile=infile, comment=comment, sep=sep, field=field,<|fim▁hole|> b.add_data_itr(r.data) s = str(b) if s: print(s) else: print("No data.") return<|fim▁end|>
regexp=regexp) b = TBar(_max=max, length=length, vertical=vertical)
<|file_name|>suggestion.py<|end_file_name|><|fim▁begin|># coding: utf-8 # # Copyright 2018 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Controllers for suggestions.""" from constants import constants from core.controllers import base from core.domain import acl_decorators from core.domain import suggestion_services from core.platform import models (suggestion_models,) = models.Registry.import_models([models.NAMES.suggestion]) class SuggestionHandler(base.BaseHandler): """"Handles operations relating to suggestions.""" @acl_decorators.can_suggest_changes def post(self): if not constants.USE_NEW_SUGGESTION_FRAMEWORK: raise self.PageNotFoundException suggestion_services.create_suggestion( self.payload.get('suggestion_type'), self.payload.get('target_type'), self.payload.get('target_id'), self.payload.get('target_version_at_submission'), self.user_id, self.payload.get('change_cmd'), self.payload.get('description'), self.payload.get('final_reviewer_id')) self.render_json(self.values) class SuggestionToExplorationActionHandler(base.BaseHandler): """Handles actions performed on suggestions to explorations.""" ACTION_TYPE_ACCEPT = 'accept' ACTION_TYPE_REJECT = 'reject' # TODO (nithesh): Add permissions for users with enough scores to review # Will be added as part of milestone 2 of the generalized review system # project. @acl_decorators.can_edit_exploration def put(self, exploration_id, suggestion_id): if not constants.USE_NEW_SUGGESTION_FRAMEWORK: raise self.PageNotFoundException if len(suggestion_id.split('.')) != 3: raise self.InvalidInputException('Invalid format for suggestion_id.' ' It must contain 3 parts'<|fim▁hole|> ' on suggestions to explorations.') if suggestion_id.split('.')[1] != exploration_id: raise self.InvalidInputException('The exploration id provided does ' 'not match the exploration id ' 'present as part of the ' 'suggestion_id') action = self.payload.get('action') suggestion = suggestion_services.get_suggestion_by_id(suggestion_id) if action == self.ACTION_TYPE_ACCEPT: suggestion_services.accept_suggestion( suggestion, self.user_id, self.payload.get('commit_message'), self.payload.get('review_message')) elif action == self.ACTION_TYPE_REJECT: suggestion_services.reject_suggestion( suggestion, self.user_id, self.payload.get('review_message')) else: raise self.InvalidInputException('Invalid action.') self.render_json(self.values) class SuggestionListHandler(base.BaseHandler): """Handles list operations on suggestions.""" LIST_TYPE_AUTHOR = 'author' LIST_TYPE_ID = 'id' LIST_TYPE_REVIEWER = 'reviewer' LIST_TYPE_STATUS = 'status' LIST_TYPE_SUGGESTION_TYPE = 'type' LIST_TYPE_TARGET_ID = 'target' LIST_TYPES_TO_SERVICES_MAPPING = { LIST_TYPE_AUTHOR: suggestion_services.get_suggestions_by_author, LIST_TYPE_ID: suggestion_services.get_suggestion_by_id, LIST_TYPE_REVIEWER: suggestion_services.get_suggestions_reviewed_by, LIST_TYPE_STATUS: suggestion_services.get_suggestions_by_status, LIST_TYPE_SUGGESTION_TYPE: suggestion_services.get_suggestion_by_type, LIST_TYPE_TARGET_ID: suggestion_services.get_suggestions_by_target_id } PARAMS_FOR_LIST_TYPES = { LIST_TYPE_AUTHOR: ['author_id'], LIST_TYPE_ID: ['suggestion_id'], LIST_TYPE_REVIEWER: ['reviewer_id'], LIST_TYPE_STATUS: ['status'], LIST_TYPE_SUGGESTION_TYPE: ['suggestion_type'], LIST_TYPE_TARGET_ID: ['target_type', 'target_id'] } def get_params_from_request(self, request, list_type): return [request.get(param_name) for param_name in self.PARAMS_FOR_LIST_TYPES[list_type]] @acl_decorators.open_access def get(self): if not constants.USE_NEW_SUGGESTION_FRAMEWORK: raise self.PageNotFoundException list_type = self.request.get('list_type') if list_type not in self.LIST_TYPES_TO_SERVICES_MAPPING: raise self.InvalidInputException('Invalid list type.') params = self.get_params_from_request(self.request, list_type) suggestions = self.LIST_TYPES_TO_SERVICES_MAPPING[list_type](*params) # When querying by ID, only a single suggestion is retrieved, so we make # it a list. if list_type == self.LIST_TYPE_ID: suggestions = [suggestions] self.values.update({'suggestions': [s.to_dict() for s in suggestions]}) self.render_json(self.values)<|fim▁end|>
' separated by \'.\'') if suggestion_id.split('.')[0] != 'exploration': raise self.InvalidInputException('This handler allows actions only'
<|file_name|>bitcoin_tr.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="tr" version="2.0"> <defaultcodec>UTF-8</defaultcodec> <context> <name>AboutDialog</name> <message> <location filename="../forms/aboutdialog.ui" line="+14"/> <source>About Litecoin</source> <translation>Licoin hakkında</translation> </message> <message> <location line="+39"/> <source>&lt;b&gt;Litecoin&lt;/b&gt; version</source> <translation>&lt;b&gt;Licoin&lt;/b&gt; sürüm</translation> </message> <message> <location line="+57"/> <source> This is experimental software. Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source> <translation> Bu yazılım deneme safhasındadır. MIT/X11 yazılım lisansı kapsamında yayınlanmıştır, COPYING dosyasına ya da http://www.opensource.org/licenses/mit-license.php sayfasına bakınız. Bu ürün OpenSSL projesi tarafından OpenSSL araç takımı (http://www.openssl.org/) için geliştirilen yazılımlar, Eric Young ([email protected]) tarafından hazırlanmış şifreleme yazılımları ve Thomas Bernard tarafından programlanmış UPnP yazılımı içerir.</translation> </message> <message> <location filename="../aboutdialog.cpp" line="+14"/> <source>Copyright</source> <translation>Telif hakkı</translation> </message> <message> <location line="+0"/> <source>The Litecoin developers</source> <translation>Licoin geliştiricileri</translation> </message> </context> <context> <name>AddressBookPage</name> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>Address Book</source> <translation>Adres defteri</translation> </message> <message> <location line="+19"/> <source>Double-click to edit address or label</source> <translation>Adresi ya da etiketi düzenlemek için çift tıklayınız</translation> </message> <message> <location line="+27"/> <source>Create a new address</source> <translation>Yeni bir adres oluştur</translation> </message> <message> <location line="+14"/> <source>Copy the currently selected address to the system clipboard</source> <translation>Şu anda seçili olan adresi panoya kopyala</translation> </message> <message> <location line="-11"/> <source>&amp;New Address</source> <translation>&amp;Yeni adres</translation> </message> <message> <location filename="../addressbookpage.cpp" line="+63"/> <source>These are your Litecoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source> <translation>Bunlar, ödeme almak için Licoin adresleridir. Kimin ödeme yaptığını izleyebilmek için her ödeme yollaması gereken kişiye değişik bir adres verebilirsiniz.</translation> </message> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>&amp;Copy Address</source> <translation>Adresi &amp;kopyala</translation> </message> <message> <location line="+11"/> <source>Show &amp;QR Code</source> <translation>&amp;QR kodunu göster</translation> </message> <message> <location line="+11"/> <source>Sign a message to prove you own a Litecoin address</source> <translation>Bir Licoin adresinin sizin olduğunu ispatlamak için mesaj imzalayın</translation> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation>&amp;Mesaj imzala</translation> </message> <message> <location line="+25"/> <source>Delete the currently selected address from the list</source> <translation>Seçili adresi listeden sil</translation> </message> <message> <location line="+27"/> <source>Export the data in the current tab to a file</source> <translation>Güncel sekmedeki verileri bir dosyaya aktar</translation> </message> <message> <location line="+3"/> <source>&amp;Export</source> <translation>&amp;Dışa aktar</translation> </message> <message> <location line="-44"/> <source>Verify a message to ensure it was signed with a specified Litecoin address</source> <translation>Belirtilen Licoin adresi ile imzalandığını doğrulamak için bir mesajı kontrol et</translation> </message> <message> <location line="+3"/> <source>&amp;Verify Message</source> <translation>Mesaj &amp;kontrol et</translation> </message> <message> <location line="+14"/> <source>&amp;Delete</source> <translation>&amp;Sil</translation> </message> <message> <location filename="../addressbookpage.cpp" line="-5"/> <source>These are your Litecoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source> <translation>Bunlar ödeme yapmak için kullanacağınız Licoin adreslerinizdir. Licoin yollamadan önce meblağı ve alıcı adresini daima kontrol ediniz.</translation> </message> <message> <location line="+13"/> <source>Copy &amp;Label</source> <translation>&amp;Etiketi kopyala</translation> </message> <message> <location line="+1"/> <source>&amp;Edit</source> <translation>&amp;Düzenle</translation> </message> <message> <location line="+1"/> <source>Send &amp;Coins</source> <translation>Bit&amp;coin Gönder</translation> </message> <message> <location line="+260"/> <source>Export Address Book Data</source> <translation>Adres defteri verilerini dışa aktar</translation> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>Virgülle ayrılmış değerler dosyası (*.csv)</translation> </message> <message> <location line="+13"/> <source>Error exporting</source> <translation>Dışa aktarımda hata oluştu</translation> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation>%1 dosyasına yazılamadı.</translation> </message> </context> <context> <name>AddressTableModel</name> <message> <location filename="../addresstablemodel.cpp" line="+144"/> <source>Label</source> <translation>Etiket</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Adres</translation> </message> <message> <location line="+36"/> <source>(no label)</source> <translation>(boş etiket)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <location filename="../forms/askpassphrasedialog.ui" line="+26"/> <source>Passphrase Dialog</source> <translation>Parola diyaloğu</translation> </message> <message> <location line="+21"/> <source>Enter passphrase</source> <translation>Parolayı giriniz</translation> </message> <message> <location line="+14"/> <source>New passphrase</source> <translation>Yeni parola</translation> </message> <message> <location line="+14"/> <source>Repeat new passphrase</source> <translation>Yeni parolayı tekrarlayınız</translation> </message> <message> <location filename="../askpassphrasedialog.cpp" line="+33"/> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;10 or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation>Cüzdanınız için yeni parolayı giriniz.&lt;br/&gt;Lütfen &lt;b&gt;10 ya da daha fazla rastgele karakter&lt;/b&gt; veya &lt;b&gt;sekiz ya da daha fazla kelime&lt;/b&gt; içeren bir parola seçiniz.</translation> </message> <message> <location line="+1"/> <source>Encrypt wallet</source> <translation>Cüzdanı şifrele</translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>Bu işlem cüzdan kilidini açmak için cüzdan parolanızı gerektirir.</translation> </message> <message> <location line="+5"/> <source>Unlock wallet</source> <translation>Cüzdan kilidini aç</translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>Bu işlem, cüzdan şifresini açmak için cüzdan parolasını gerektirir.</translation> </message> <message> <location line="+5"/> <source>Decrypt wallet</source> <translation>Cüzdan şifresini aç</translation> </message> <message> <location line="+3"/> <source>Change passphrase</source> <translation>Parolayı değiştir</translation> </message> <message> <location line="+1"/> <source>Enter the old and new passphrase to the wallet.</source> <translation>Cüzdan için eski ve yeni parolaları giriniz.</translation> </message> <message> <location line="+46"/> <source>Confirm wallet encryption</source> <translation>Cüzdan şifrelenmesini teyit eder</translation> </message> <message> <location line="+1"/> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR LITECOINS&lt;/b&gt;!</source> <translation>Uyarı: Eğer cüzdanınızı şifrelerseniz ve parolanızı kaybederseniz, &lt;b&gt;TÜM BİTCOİNLERİNİZİ KAYBEDERSİNİZ&lt;/b&gt;!</translation> </message> <message> <location line="+0"/> <source>Are you sure you wish to encrypt your wallet?</source> <translation>Cüzdanınızı şifrelemek istediğinizden emin misiniz?</translation> </message> <message> <location line="+15"/> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation>ÖNEMLİ: Önceden yapmış olduğunuz cüzdan dosyası yedeklemelerinin yeni oluşturulan şifrelenmiş cüzdan dosyası ile değiştirilmeleri gerekir. Güvenlik nedenleriyle yeni, şifrelenmiş cüzdanı kullanmaya başladığınızda eski şifrelenmemiş cüzdan dosyaları işe yaramaz hale gelecektir.</translation> </message> <message> <location line="+100"/> <location line="+24"/> <source>Warning: The Caps Lock key is on!</source> <translation>Uyarı: Caps Lock tuşu faal durumda!</translation> </message> <message> <location line="-130"/> <location line="+58"/> <source>Wallet encrypted</source> <translation>Cüzdan şifrelendi</translation> </message> <message> <location line="-56"/> <source>Litecoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your litecoins from being stolen by malware infecting your computer.</source> <translation>Şifreleme işlemini tamamlamak için Licoin şimdi kapanacaktır. Cüzdanınızı şifrelemenin, Licoinlerinizin bilgisayara bulaşan kötücül bir yazılım tarafından çalınmaya karşı tamamen koruyamayacağını unutmayınız.</translation> </message> <message> <location line="+13"/> <location line="+7"/> <location line="+42"/> <location line="+6"/> <source>Wallet encryption failed</source> <translation>Cüzdan şifrelemesi başarısız oldu</translation> </message> <message> <location line="-54"/> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>Dahili bir hata sebebiyle cüzdan şifrelemesi başarısız oldu. Cüzdanınız şifrelenmedi.</translation> </message> <message> <location line="+7"/> <location line="+48"/> <source>The supplied passphrases do not match.</source><|fim▁hole|> <message> <location line="-37"/> <source>Wallet unlock failed</source> <translation>Cüzdan kilidinin açılması başarısız oldu</translation> </message> <message> <location line="+1"/> <location line="+11"/> <location line="+19"/> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation>Cüzdan şifresinin açılması için girilen parola yanlıştı.</translation> </message> <message> <location line="-20"/> <source>Wallet decryption failed</source> <translation>Cüzdan şifresinin açılması başarısız oldu</translation> </message> <message> <location line="+14"/> <source>Wallet passphrase was successfully changed.</source> <translation>Cüzdan parolası başarılı bir şekilde değiştirildi.</translation> </message> </context> <context> <name>BitcoinGUI</name> <message> <location filename="../bitcoingui.cpp" line="+233"/> <source>Sign &amp;message...</source> <translation>&amp;Mesaj imzala...</translation> </message> <message> <location line="+280"/> <source>Synchronizing with network...</source> <translation>Şebeke ile senkronizasyon...</translation> </message> <message> <location line="-349"/> <source>&amp;Overview</source> <translation>&amp;Genel bakış</translation> </message> <message> <location line="+1"/> <source>Show general overview of wallet</source> <translation>Cüzdana genel bakışı göster</translation> </message> <message> <location line="+20"/> <source>&amp;Transactions</source> <translation>&amp;Muameleler</translation> </message> <message> <location line="+1"/> <source>Browse transaction history</source> <translation>Muamele tarihçesini tara</translation> </message> <message> <location line="+7"/> <source>Edit the list of stored addresses and labels</source> <translation>Saklanan adres ve etiket listesini düzenle</translation> </message> <message> <location line="-14"/> <source>Show the list of addresses for receiving payments</source> <translation>Ödeme alma adreslerinin listesini göster</translation> </message> <message> <location line="+31"/> <source>E&amp;xit</source> <translation>&amp;Çık</translation> </message> <message> <location line="+1"/> <source>Quit application</source> <translation>Uygulamadan çık</translation> </message> <message> <location line="+4"/> <source>Show information about Litecoin</source> <translation>Licoin hakkında bilgi göster</translation> </message> <message> <location line="+2"/> <source>About &amp;Qt</source> <translation>&amp;Qt hakkında</translation> </message> <message> <location line="+1"/> <source>Show information about Qt</source> <translation>Qt hakkında bilgi görüntü</translation> </message> <message> <location line="+2"/> <source>&amp;Options...</source> <translation>&amp;Seçenekler...</translation> </message> <message> <location line="+6"/> <source>&amp;Encrypt Wallet...</source> <translation>Cüzdanı &amp;şifrele...</translation> </message> <message> <location line="+3"/> <source>&amp;Backup Wallet...</source> <translation>Cüzdanı &amp;yedekle...</translation> </message> <message> <location line="+2"/> <source>&amp;Change Passphrase...</source> <translation>Parolayı &amp;değiştir...</translation> </message> <message> <location line="+285"/> <source>Importing blocks from disk...</source> <translation>Bloklar diskten içe aktarılıyor...</translation> </message> <message> <location line="+3"/> <source>Reindexing blocks on disk...</source> <translation>Diskteki bloklar yeniden endeksleniyor...</translation> </message> <message> <location line="-347"/> <source>Send coins to a Litecoin address</source> <translation>Bir Licoin adresine Licoin yolla</translation> </message> <message> <location line="+49"/> <source>Modify configuration options for Litecoin</source> <translation>Licoin seçeneklerinin yapılandırmasını değiştir</translation> </message> <message> <location line="+9"/> <source>Backup wallet to another location</source> <translation>Cüzdanı diğer bir konumda yedekle</translation> </message> <message> <location line="+2"/> <source>Change the passphrase used for wallet encryption</source> <translation>Cüzdan şifrelemesi için kullanılan parolayı değiştir</translation> </message> <message> <location line="+6"/> <source>&amp;Debug window</source> <translation>&amp;Hata ayıklama penceresi</translation> </message> <message> <location line="+1"/> <source>Open debugging and diagnostic console</source> <translation>Hata ayıklama ve teşhis penceresini aç</translation> </message> <message> <location line="-4"/> <source>&amp;Verify message...</source> <translation>Mesaj &amp;kontrol et...</translation> </message> <message> <location line="-165"/> <location line="+530"/> <source>Litecoin</source> <translation>Licoin</translation> </message> <message> <location line="-530"/> <source>Wallet</source> <translation>Cüzdan</translation> </message> <message> <location line="+101"/> <source>&amp;Send</source> <translation>&amp;Gönder</translation> </message> <message> <location line="+7"/> <source>&amp;Receive</source> <translation>&amp;Al</translation> </message> <message> <location line="+14"/> <source>&amp;Addresses</source> <translation>&amp;Adresler</translation> </message> <message> <location line="+22"/> <source>&amp;About Litecoin</source> <translation>Licoin &amp;Hakkında</translation> </message> <message> <location line="+9"/> <source>&amp;Show / Hide</source> <translation>&amp;Göster / Sakla</translation> </message> <message> <location line="+1"/> <source>Show or hide the main Window</source> <translation>Ana pencereyi görüntüle ya da sakla</translation> </message> <message> <location line="+3"/> <source>Encrypt the private keys that belong to your wallet</source> <translation>Cüzdanınızın özel anahtarlarını şifrele</translation> </message> <message> <location line="+7"/> <source>Sign messages with your Litecoin addresses to prove you own them</source> <translation>Mesajları adreslerin size ait olduğunu ispatlamak için Licoin adresleri ile imzala</translation> </message> <message> <location line="+2"/> <source>Verify messages to ensure they were signed with specified Litecoin addresses</source> <translation>Belirtilen Licoin adresleri ile imzalandıklarından emin olmak için mesajları kontrol et</translation> </message> <message> <location line="+28"/> <source>&amp;File</source> <translation>&amp;Dosya</translation> </message> <message> <location line="+7"/> <source>&amp;Settings</source> <translation>&amp;Ayarlar</translation> </message> <message> <location line="+6"/> <source>&amp;Help</source> <translation>&amp;Yardım</translation> </message> <message> <location line="+9"/> <source>Tabs toolbar</source> <translation>Sekme araç çubuğu</translation> </message> <message> <location line="+17"/> <location line="+10"/> <source>[testnet]</source> <translation>[testnet]</translation> </message> <message> <location line="+47"/> <source>Litecoin client</source> <translation>Licoin istemcisi</translation> </message> <message numerus="yes"> <location line="+141"/> <source>%n active connection(s) to Litecoin network</source> <translation><numerusform>Licoin şebekesine %n faal bağlantı</numerusform><numerusform>Licoin şebekesine %n faal bağlantı</numerusform></translation> </message> <message> <location line="+22"/> <source>No block source available...</source> <translation>Hiçbir blok kaynağı mevcut değil...</translation> </message> <message> <location line="+12"/> <source>Processed %1 of %2 (estimated) blocks of transaction history.</source> <translation>Muamele tarihçesinin toplam (tahmini) %2 blokundan %1 blok işlendi.</translation> </message> <message> <location line="+4"/> <source>Processed %1 blocks of transaction history.</source> <translation>Muamele tarihçesinde %1 blok işlendi.</translation> </message> <message numerus="yes"> <location line="+20"/> <source>%n hour(s)</source> <translation><numerusform>%n saat</numerusform><numerusform>%n saat</numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n day(s)</source> <translation><numerusform>%n gün</numerusform><numerusform>%n gün</numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n week(s)</source> <translation><numerusform>%n hafta</numerusform><numerusform>%n hafta</numerusform></translation> </message> <message> <location line="+4"/> <source>%1 behind</source> <translation>%1 geride</translation> </message> <message> <location line="+14"/> <source>Last received block was generated %1 ago.</source> <translation>Son alınan blok %1 evvel oluşturulmuştu.</translation> </message> <message> <location line="+2"/> <source>Transactions after this will not yet be visible.</source> <translation>Bundan sonraki muameleler henüz görüntülenemez.</translation> </message> <message> <location line="+22"/> <source>Error</source> <translation>Hata</translation> </message> <message> <location line="+3"/> <source>Warning</source> <translation>Uyarı</translation> </message> <message> <location line="+3"/> <source>Information</source> <translation>Bilgi</translation> </message> <message> <location line="+70"/> <source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source> <translation>Bu muamele boyut sınırlarını aşmıştır. Gene de %1 ücret ödeyerek gönderebilirsiniz, ki bu ücret muamelenizi işleyen ve şebekeye yardım eden düğümlere ödenecektir. Ücreti ödemek istiyor musunuz?</translation> </message> <message> <location line="-140"/> <source>Up to date</source> <translation>Güncel</translation> </message> <message> <location line="+31"/> <source>Catching up...</source> <translation>Aralık kapatılıyor...</translation> </message> <message> <location line="+113"/> <source>Confirm transaction fee</source> <translation>Muamele ücretini teyit et</translation> </message> <message> <location line="+8"/> <source>Sent transaction</source> <translation>Muamele yollandı</translation> </message> <message> <location line="+0"/> <source>Incoming transaction</source> <translation>Gelen muamele</translation> </message> <message> <location line="+1"/> <source>Date: %1 Amount: %2 Type: %3 Address: %4 </source> <translation>Tarih: %1 Miktar: %2 Tür: %3 Adres: %4 </translation> </message> <message> <location line="+33"/> <location line="+23"/> <source>URI handling</source> <translation>URI yönetimi</translation> </message> <message> <location line="-23"/> <location line="+23"/> <source>URI can not be parsed! This can be caused by an invalid Litecoin address or malformed URI parameters.</source> <translation>URI okunamadı! Sebebi geçersiz bir Licoin adresi veya hatalı URI parametreleri olabilir.</translation> </message> <message> <location line="+17"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>Cüzdan &lt;b&gt;şifrelenmiştir&lt;/b&gt; ve şu anda &lt;b&gt;kilidi açıktır&lt;/b&gt;</translation> </message> <message> <location line="+8"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>Cüzdan &lt;b&gt;şifrelenmiştir&lt;/b&gt; ve şu anda &lt;b&gt;kilitlidir&lt;/b&gt;</translation> </message> <message> <location filename="../bitcoin.cpp" line="+111"/> <source>A fatal error occurred. Litecoin can no longer continue safely and will quit.</source> <translation>Ciddi bir hata oluştu. Licoin artık güvenli bir şekilde işlemeye devam edemez ve kapanacaktır.</translation> </message> </context> <context> <name>ClientModel</name> <message> <location filename="../clientmodel.cpp" line="+104"/> <source>Network Alert</source> <translation>Şebeke hakkında uyarı</translation> </message> </context> <context> <name>EditAddressDialog</name> <message> <location filename="../forms/editaddressdialog.ui" line="+14"/> <source>Edit Address</source> <translation>Adresi düzenle</translation> </message> <message> <location line="+11"/> <source>&amp;Label</source> <translation>&amp;Etiket</translation> </message> <message> <location line="+10"/> <source>The label associated with this address book entry</source> <translation>Bu adres defteri unsuru ile ilişkili etiket</translation> </message> <message> <location line="+7"/> <source>&amp;Address</source> <translation>&amp;Adres</translation> </message> <message> <location line="+10"/> <source>The address associated with this address book entry. This can only be modified for sending addresses.</source> <translation>Bu adres defteri unsuru ile ilişkili adres. Bu, sadece gönderi adresi için değiştirilebilir.</translation> </message> <message> <location filename="../editaddressdialog.cpp" line="+21"/> <source>New receiving address</source> <translation>Yeni alım adresi</translation> </message> <message> <location line="+4"/> <source>New sending address</source> <translation>Yeni gönderi adresi</translation> </message> <message> <location line="+3"/> <source>Edit receiving address</source> <translation>Alım adresini düzenle</translation> </message> <message> <location line="+4"/> <source>Edit sending address</source> <translation>Gönderi adresini düzenle</translation> </message> <message> <location line="+76"/> <source>The entered address &quot;%1&quot; is already in the address book.</source> <translation>Girilen &quot;%1&quot; adresi hâlihazırda adres defterinde mevcuttur.</translation> </message> <message> <location line="-5"/> <source>The entered address &quot;%1&quot; is not a valid Litecoin address.</source> <translation>Girilen &quot;%1&quot; adresi geçerli bir Licoin adresi değildir.</translation> </message> <message> <location line="+10"/> <source>Could not unlock wallet.</source> <translation>Cüzdan kilidi açılamadı.</translation> </message> <message> <location line="+5"/> <source>New key generation failed.</source> <translation>Yeni anahtar oluşturulması başarısız oldu.</translation> </message> </context> <context> <name>GUIUtil::HelpMessageBox</name> <message> <location filename="../guiutil.cpp" line="+424"/> <location line="+12"/> <source>Litecoin-Qt</source> <translation>Licoin-Qt</translation> </message> <message> <location line="-12"/> <source>version</source> <translation>sürüm</translation> </message> <message> <location line="+2"/> <source>Usage:</source> <translation>Kullanım:</translation> </message> <message> <location line="+1"/> <source>command-line options</source> <translation>komut satırı seçenekleri</translation> </message> <message> <location line="+4"/> <source>UI options</source> <translation>Kullanıcı arayüzü seçenekleri</translation> </message> <message> <location line="+1"/> <source>Set language, for example &quot;de_DE&quot; (default: system locale)</source> <translation>Lisan belirt, mesela &quot;de_De&quot; (varsayılan: sistem dili)</translation> </message> <message> <location line="+1"/> <source>Start minimized</source> <translation>Küçültülmüş olarak başlat</translation> </message> <message> <location line="+1"/> <source>Show splash screen on startup (default: 1)</source> <translation>Başlatıldığında başlangıç ekranını göster (varsayılan: 1)</translation> </message> </context> <context> <name>OptionsDialog</name> <message> <location filename="../forms/optionsdialog.ui" line="+14"/> <source>Options</source> <translation>Seçenekler</translation> </message> <message> <location line="+16"/> <source>&amp;Main</source> <translation>&amp;Esas ayarlar</translation> </message> <message> <location line="+6"/> <source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source> <translation>Muamelelerin hızlı işlenmesini garantilemeye yardım eden, seçime dayalı kB başı muamele ücreti. Muamelelerin çoğunluğunun boyutu 1 kB&apos;dir.</translation> </message> <message> <location line="+15"/> <source>Pay transaction &amp;fee</source> <translation>Muamele ücreti &amp;öde</translation> </message> <message> <location line="+31"/> <source>Automatically start Litecoin after logging in to the system.</source> <translation>Sistemde oturum açıldığında Licoin&apos;i otomatik olarak başlat.</translation> </message> <message> <location line="+3"/> <source>&amp;Start Litecoin on system login</source> <translation>Licoin&apos;i sistem oturumuyla &amp;başlat</translation> </message> <message> <location line="+35"/> <source>Reset all client options to default.</source> <translation>İstemcinin tüm seçeneklerini varsayılan değerlere geri al.</translation> </message> <message> <location line="+3"/> <source>&amp;Reset Options</source> <translation>Seçenekleri &amp;sıfırla</translation> </message> <message> <location line="+13"/> <source>&amp;Network</source> <translation>&amp;Şebeke</translation> </message> <message> <location line="+6"/> <source>Automatically open the Litecoin client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation>Yönlendiricide Licoin istemci portlarını otomatik olarak açar. Bu, sadece yönlendiricinizin UPnP desteği bulunuyorsa ve etkinse çalışabilir.</translation> </message> <message> <location line="+3"/> <source>Map port using &amp;UPnP</source> <translation>Portları &amp;UPnP kullanarak haritala</translation> </message> <message> <location line="+7"/> <source>Connect to the Litecoin network through a SOCKS proxy (e.g. when connecting through Tor).</source> <translation>Licoin şebekesine SOCKS vekil sunucusu vasıtasıyla bağlan (mesela Tor ile bağlanıldığında).</translation> </message> <message> <location line="+3"/> <source>&amp;Connect through SOCKS proxy:</source> <translation>SOCKS vekil sunucusu vasıtasıyla ba&amp;ğlan:</translation> </message> <message> <location line="+9"/> <source>Proxy &amp;IP:</source> <translation>Vekil &amp;İP:</translation> </message> <message> <location line="+19"/> <source>IP address of the proxy (e.g. 127.0.0.1)</source> <translation>Vekil sunucunun İP adresi (mesela 127.0.0.1)</translation> </message> <message> <location line="+7"/> <source>&amp;Port:</source> <translation>&amp;Port:</translation> </message> <message> <location line="+19"/> <source>Port of the proxy (e.g. 9050)</source> <translation>Vekil sunucunun portu (mesela 9050)</translation> </message> <message> <location line="+7"/> <source>SOCKS &amp;Version:</source> <translation>SOCKS &amp;sürümü:</translation> </message> <message> <location line="+13"/> <source>SOCKS version of the proxy (e.g. 5)</source> <translation>Vekil sunucunun SOCKS sürümü (mesela 5)</translation> </message> <message> <location line="+36"/> <source>&amp;Window</source> <translation>&amp;Pencere</translation> </message> <message> <location line="+6"/> <source>Show only a tray icon after minimizing the window.</source> <translation>Küçültüldükten sonra sadece çekmece ikonu göster.</translation> </message> <message> <location line="+3"/> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation>İşlem çubuğu yerine sistem çekmecesine &amp;küçült</translation> </message> <message> <location line="+7"/> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source> <translation>Pencere kapatıldığında uygulamadan çıkmak yerine uygulamayı küçültür. Bu seçenek etkinleştirildiğinde, uygulama sadece menüden çıkış seçildiğinde kapanacaktır.</translation> </message> <message> <location line="+3"/> <source>M&amp;inimize on close</source> <translation>Kapatma sırasında k&amp;üçült</translation> </message> <message> <location line="+21"/> <source>&amp;Display</source> <translation>&amp;Görünüm</translation> </message> <message> <location line="+8"/> <source>User Interface &amp;language:</source> <translation>Kullanıcı arayüzü &amp;lisanı:</translation> </message> <message> <location line="+13"/> <source>The user interface language can be set here. This setting will take effect after restarting Litecoin.</source> <translation>Kullanıcı arayüzünün dili burada belirtilebilir. Bu ayar Licoin tekrar başlatıldığında etkinleşecektir.</translation> </message> <message> <location line="+11"/> <source>&amp;Unit to show amounts in:</source> <translation>Miktarı göstermek için &amp;birim:</translation> </message> <message> <location line="+13"/> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation>Licoin gönderildiğinde arayüzde gösterilecek varsayılan alt birimi seçiniz.</translation> </message> <message> <location line="+9"/> <source>Whether to show Litecoin addresses in the transaction list or not.</source> <translation>Muamele listesinde Licoin adreslerinin gösterilip gösterilmeyeceklerini belirler.</translation> </message> <message> <location line="+3"/> <source>&amp;Display addresses in transaction list</source> <translation>Muamele listesinde adresleri &amp;göster</translation> </message> <message> <location line="+71"/> <source>&amp;OK</source> <translation>&amp;Tamam</translation> </message> <message> <location line="+7"/> <source>&amp;Cancel</source> <translation>&amp;İptal</translation> </message> <message> <location line="+10"/> <source>&amp;Apply</source> <translation>&amp;Uygula</translation> </message> <message> <location filename="../optionsdialog.cpp" line="+53"/> <source>default</source> <translation>varsayılan</translation> </message> <message> <location line="+130"/> <source>Confirm options reset</source> <translation>Seçeneklerin sıfırlanmasını teyit et</translation> </message> <message> <location line="+1"/> <source>Some settings may require a client restart to take effect.</source> <translation>Bazı ayarların dikkate alınması istemcinin tekrar başlatılmasını gerektirebilir.</translation> </message> <message> <location line="+0"/> <source>Do you want to proceed?</source> <translation>Devam etmek istiyor musunuz?</translation> </message> <message> <location line="+42"/> <location line="+9"/> <source>Warning</source> <translation>Uyarı</translation> </message> <message> <location line="-9"/> <location line="+9"/> <source>This setting will take effect after restarting Litecoin.</source> <translation>Bu ayarlar Licoin tekrar başlatıldığında etkinleşecektir.</translation> </message> <message> <location line="+29"/> <source>The supplied proxy address is invalid.</source> <translation>Girilen vekil sunucu adresi geçersizdir.</translation> </message> </context> <context> <name>OverviewPage</name> <message> <location filename="../forms/overviewpage.ui" line="+14"/> <source>Form</source> <translation>Form</translation> </message> <message> <location line="+50"/> <location line="+166"/> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the Litecoin network after a connection is established, but this process has not completed yet.</source> <translation>Görüntülenen veriler zaman aşımına uğramış olabilir. Bağlantı kurulduğunda cüzdanınız otomatik olarak şebeke ile eşleşir ancak bu işlem henüz tamamlanmamıştır.</translation> </message> <message> <location line="-124"/> <source>Balance:</source> <translation>Bakiye:</translation> </message> <message> <location line="+29"/> <source>Unconfirmed:</source> <translation>Doğrulanmamış:</translation> </message> <message> <location line="-78"/> <source>Wallet</source> <translation>Cüzdan</translation> </message> <message> <location line="+107"/> <source>Immature:</source> <translation>Olgunlaşmamış:</translation> </message> <message> <location line="+13"/> <source>Mined balance that has not yet matured</source> <translation>Oluşturulan bakiye henüz olgunlaşmamıştır</translation> </message> <message> <location line="+46"/> <source>&lt;b&gt;Recent transactions&lt;/b&gt;</source> <translation>&lt;b&gt;Son muameleler&lt;/b&gt;</translation> </message> <message> <location line="-101"/> <source>Your current balance</source> <translation>Güncel bakiyeniz</translation> </message> <message> <location line="+29"/> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source> <translation>Doğrulanması beklenen ve henüz güncel bakiyeye ilâve edilmemiş muamelelerin toplamı</translation> </message> <message> <location filename="../overviewpage.cpp" line="+116"/> <location line="+1"/> <source>out of sync</source> <translation>eşleşme dışı</translation> </message> </context> <context> <name>PaymentServer</name> <message> <location filename="../paymentserver.cpp" line="+107"/> <source>Cannot start litecoin: click-to-pay handler</source> <translation>Licoin başlatılamadı: tıkla-ve-öde yöneticisi</translation> </message> </context> <context> <name>QRCodeDialog</name> <message> <location filename="../forms/qrcodedialog.ui" line="+14"/> <source>QR Code Dialog</source> <translation>QR kodu diyaloğu</translation> </message> <message> <location line="+59"/> <source>Request Payment</source> <translation>Ödeme talebi</translation> </message> <message> <location line="+56"/> <source>Amount:</source> <translation>Miktar:</translation> </message> <message> <location line="-44"/> <source>Label:</source> <translation>Etiket:</translation> </message> <message> <location line="+19"/> <source>Message:</source> <translation>Mesaj:</translation> </message> <message> <location line="+71"/> <source>&amp;Save As...</source> <translation>&amp;Farklı kaydet...</translation> </message> <message> <location filename="../qrcodedialog.cpp" line="+62"/> <source>Error encoding URI into QR Code.</source> <translation>URI&apos;nin QR koduna kodlanmasında hata oluştu.</translation> </message> <message> <location line="+40"/> <source>The entered amount is invalid, please check.</source> <translation>Girilen miktar geçersizdir, kontrol ediniz.</translation> </message> <message> <location line="+23"/> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation>Sonuç URI çok uzun, etiket ya da mesaj metnini kısaltmayı deneyiniz.</translation> </message> <message> <location line="+25"/> <source>Save QR Code</source> <translation>QR kodu kaydet</translation> </message> <message> <location line="+0"/> <source>PNG Images (*.png)</source> <translation>PNG resimleri (*.png)</translation> </message> </context> <context> <name>RPCConsole</name> <message> <location filename="../forms/rpcconsole.ui" line="+46"/> <source>Client name</source> <translation>İstemci ismi</translation> </message> <message> <location line="+10"/> <location line="+23"/> <location line="+26"/> <location line="+23"/> <location line="+23"/> <location line="+36"/> <location line="+53"/> <location line="+23"/> <location line="+23"/> <location filename="../rpcconsole.cpp" line="+339"/> <source>N/A</source> <translation>Mevcut değil</translation> </message> <message> <location line="-217"/> <source>Client version</source> <translation>İstemci sürümü</translation> </message> <message> <location line="-45"/> <source>&amp;Information</source> <translation>&amp;Malumat</translation> </message> <message> <location line="+68"/> <source>Using OpenSSL version</source> <translation>Kullanılan OpenSSL sürümü</translation> </message> <message> <location line="+49"/> <source>Startup time</source> <translation>Başlama zamanı</translation> </message> <message> <location line="+29"/> <source>Network</source> <translation>Şebeke</translation> </message> <message> <location line="+7"/> <source>Number of connections</source> <translation>Bağlantı sayısı</translation> </message> <message> <location line="+23"/> <source>On testnet</source> <translation>Testnet üzerinde</translation> </message> <message> <location line="+23"/> <source>Block chain</source> <translation>Blok zinciri</translation> </message> <message> <location line="+7"/> <source>Current number of blocks</source> <translation>Güncel blok sayısı</translation> </message> <message> <location line="+23"/> <source>Estimated total blocks</source> <translation>Tahmini toplam blok sayısı</translation> </message> <message> <location line="+23"/> <source>Last block time</source> <translation>Son blok zamanı</translation> </message> <message> <location line="+52"/> <source>&amp;Open</source> <translation>&amp;Aç</translation> </message> <message> <location line="+16"/> <source>Command-line options</source> <translation>Komut satırı seçenekleri</translation> </message> <message> <location line="+7"/> <source>Show the Litecoin-Qt help message to get a list with possible Litecoin command-line options.</source> <translation>Mevcut Licoin komut satırı seçeneklerinin listesini içeren Licoin-Qt yardımını göster.</translation> </message> <message> <location line="+3"/> <source>&amp;Show</source> <translation>&amp;Göster</translation> </message> <message> <location line="+24"/> <source>&amp;Console</source> <translation>&amp;Konsol</translation> </message> <message> <location line="-260"/> <source>Build date</source> <translation>Derleme tarihi</translation> </message> <message> <location line="-104"/> <source>Litecoin - Debug window</source> <translation>Licoin - Hata ayıklama penceresi</translation> </message> <message> <location line="+25"/> <source>Litecoin Core</source> <translation>Licoin Çekirdeği</translation> </message> <message> <location line="+279"/> <source>Debug log file</source> <translation>Hata ayıklama kütük dosyası</translation> </message> <message> <location line="+7"/> <source>Open the Litecoin debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation>Güncel veri klasöründen Licoin hata ayıklama kütük dosyasını açar. Büyük kütük dosyaları için bu birkaç saniye alabilir.</translation> </message> <message> <location line="+102"/> <source>Clear console</source> <translation>Konsolu temizle</translation> </message> <message> <location filename="../rpcconsole.cpp" line="-30"/> <source>Welcome to the Litecoin RPC console.</source> <translation>Licoin RPC konsoluna hoş geldiniz.</translation> </message> <message> <location line="+1"/> <source>Use up and down arrows to navigate history, and &lt;b&gt;Ctrl-L&lt;/b&gt; to clear screen.</source> <translation>Tarihçede gezinmek için imleç tuşlarını kullanınız, &lt;b&gt;Ctrl-L&lt;/b&gt; ile de ekranı temizleyebilirsiniz.</translation> </message> <message> <location line="+1"/> <source>Type &lt;b&gt;help&lt;/b&gt; for an overview of available commands.</source> <translation>Mevcut komutların listesi için &lt;b&gt;help&lt;/b&gt; yazınız.</translation> </message> </context> <context> <name>SendCoinsDialog</name> <message> <location filename="../forms/sendcoinsdialog.ui" line="+14"/> <location filename="../sendcoinsdialog.cpp" line="+124"/> <location line="+5"/> <location line="+5"/> <location line="+5"/> <location line="+6"/> <location line="+5"/> <location line="+5"/> <source>Send Coins</source> <translation>Licoin yolla</translation> </message> <message> <location line="+50"/> <source>Send to multiple recipients at once</source> <translation>Birçok alıcıya aynı anda gönder</translation> </message> <message> <location line="+3"/> <source>Add &amp;Recipient</source> <translation>&amp;Alıcı ekle</translation> </message> <message> <location line="+20"/> <source>Remove all transaction fields</source> <translation>Bütün muamele alanlarını kaldır</translation> </message> <message> <location line="+3"/> <source>Clear &amp;All</source> <translation>Tümünü &amp;temizle</translation> </message> <message> <location line="+22"/> <source>Balance:</source> <translation>Bakiye:</translation> </message> <message> <location line="+10"/> <source>123.456 BTC</source> <translation>123.456 BTC</translation> </message> <message> <location line="+31"/> <source>Confirm the send action</source> <translation>Yollama etkinliğini teyit ediniz</translation> </message> <message> <location line="+3"/> <source>S&amp;end</source> <translation>G&amp;önder</translation> </message> <message> <location filename="../sendcoinsdialog.cpp" line="-59"/> <source>&lt;b&gt;%1&lt;/b&gt; to %2 (%3)</source> <translation>&lt;b&gt;%1&lt;/b&gt; şu adrese: %2 (%3)</translation> </message> <message> <location line="+5"/> <source>Confirm send coins</source> <translation>Gönderiyi teyit ediniz</translation> </message> <message> <location line="+1"/> <source>Are you sure you want to send %1?</source> <translation>%1 göndermek istediğinizden emin misiniz?</translation> </message> <message> <location line="+0"/> <source> and </source> <translation> ve </translation> </message> <message> <location line="+23"/> <source>The recipient address is not valid, please recheck.</source> <translation>Alıcı adresi geçerli değildir, lütfen denetleyiniz.</translation> </message> <message> <location line="+5"/> <source>The amount to pay must be larger than 0.</source> <translation>Ödeyeceğiniz tutarın sıfırdan yüksek olması gerekir.</translation> </message> <message> <location line="+5"/> <source>The amount exceeds your balance.</source> <translation>Tutar bakiyenizden yüksektir.</translation> </message> <message> <location line="+5"/> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation>Toplam, %1 muamele ücreti ilâve edildiğinde bakiyenizi geçmektedir.</translation> </message> <message> <location line="+6"/> <source>Duplicate address found, can only send to each address once per send operation.</source> <translation>Çift adres bulundu, belli bir gönderi sırasında her adrese sadece tek bir gönderide bulunulabilir.</translation> </message> <message> <location line="+5"/> <source>Error: Transaction creation failed!</source> <translation>Hata: Muamele oluşturması başarısız oldu!</translation> </message> <message> <location line="+5"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation>Hata: Muamele reddedildi. Cüzdanınızdaki madenî paraların bazıları zaten harcanmış olduğunda bu meydana gelebilir. Örneğin wallet.dat dosyasının bir kopyasını kullandıysanız ve kopyada para harcandığında ancak burada harcandığı işaretlenmediğinde.</translation> </message> </context> <context> <name>SendCoinsEntry</name> <message> <location filename="../forms/sendcoinsentry.ui" line="+14"/> <source>Form</source> <translation>Form</translation> </message> <message> <location line="+15"/> <source>A&amp;mount:</source> <translation>M&amp;iktar:</translation> </message> <message> <location line="+13"/> <source>Pay &amp;To:</source> <translation>&amp;Şu kişiye öde:</translation> </message> <message> <location line="+34"/> <source>The address to send the payment to (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source> <translation>Ödemenin gönderileceği adres (mesela Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation> </message> <message> <location line="+60"/> <location filename="../sendcoinsentry.cpp" line="+26"/> <source>Enter a label for this address to add it to your address book</source> <translation>Adres defterinize eklemek için bu adrese ilişik bir etiket giriniz</translation> </message> <message> <location line="-78"/> <source>&amp;Label:</source> <translation>&amp;Etiket:</translation> </message> <message> <location line="+28"/> <source>Choose address from address book</source> <translation>Adres defterinden adres seç</translation> </message> <message> <location line="+10"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="+7"/> <source>Paste address from clipboard</source> <translation>Panodan adres yapıştır</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+7"/> <source>Remove this recipient</source> <translation>Bu alıcıyı kaldır</translation> </message> <message> <location filename="../sendcoinsentry.cpp" line="+1"/> <source>Enter a Litecoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source> <translation>Licoin adresi giriniz (mesela Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <location filename="../forms/signverifymessagedialog.ui" line="+14"/> <source>Signatures - Sign / Verify a Message</source> <translation>İmzalar - Mesaj İmzala / Kontrol et</translation> </message> <message> <location line="+13"/> <source>&amp;Sign Message</source> <translation>Mesaj &amp;imzala</translation> </message> <message> <location line="+6"/> <source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation>Bir adresin sizin olduğunu ispatlamak için adresinizle mesaj imzalayabilirsiniz. Oltalama saldırılarının kimliğinizi imzanızla elde etmeyi deneyebilecekleri için belirsiz hiçbir şey imzalamamaya dikkat ediniz. Sadece ayrıntılı açıklaması olan ve tümüne katıldığınız ifadeleri imzalayınız.</translation> </message> <message> <location line="+18"/> <source>The address to sign the message with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source> <translation>Mesajın imzalanmasında kullanılacak adres (mesela Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation> </message> <message> <location line="+10"/> <location line="+213"/> <source>Choose an address from the address book</source> <translation>Adres defterinden bir adres seç</translation> </message> <message> <location line="-203"/> <location line="+213"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="-203"/> <source>Paste address from clipboard</source> <translation>Panodan adres yapıştır</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+12"/> <source>Enter the message you want to sign here</source> <translation>İmzalamak istediğiniz mesajı burada giriniz</translation> </message> <message> <location line="+7"/> <source>Signature</source> <translation>İmza</translation> </message> <message> <location line="+27"/> <source>Copy the current signature to the system clipboard</source> <translation>Güncel imzayı sistem panosuna kopyala</translation> </message> <message> <location line="+21"/> <source>Sign the message to prove you own this Litecoin address</source> <translation>Bu Licoin adresinin sizin olduğunu ispatlamak için mesajı imzalayın</translation> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation>&amp;Mesajı imzala</translation> </message> <message> <location line="+14"/> <source>Reset all sign message fields</source> <translation>Tüm mesaj alanlarını sıfırla</translation> </message> <message> <location line="+3"/> <location line="+146"/> <source>Clear &amp;All</source> <translation>Tümünü &amp;temizle</translation> </message> <message> <location line="-87"/> <source>&amp;Verify Message</source> <translation>Mesaj &amp;kontrol et</translation> </message> <message> <location line="+6"/> <source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source> <translation>İmza için kullanılan adresi, mesajı (satır sonları, boşluklar, sekmeler vs. karakterleri tam olarak kopyaladığınızdan emin olunuz) ve imzayı aşağıda giriniz. Bir ortadaki adam saldırısı tarafından kandırılmaya mâni olmak için imzadan, imzalı mesajın içeriğini aşan bir anlam çıkarmamaya dikkat ediniz.</translation> </message> <message> <location line="+21"/> <source>The address the message was signed with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source> <translation>Mesajı imzalamak için kullanılmış olan adres (mesela Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation> </message> <message> <location line="+40"/> <source>Verify the message to ensure it was signed with the specified Litecoin address</source> <translation>Belirtilen Licoin adresi ile imzalandığını doğrulamak için mesajı kontrol et</translation> </message> <message> <location line="+3"/> <source>Verify &amp;Message</source> <translation>&amp;Mesaj kontrol et</translation> </message> <message> <location line="+14"/> <source>Reset all verify message fields</source> <translation>Tüm mesaj kontrolü alanlarını sıfırla</translation> </message> <message> <location filename="../signverifymessagedialog.cpp" line="+27"/> <location line="+3"/> <source>Enter a Litecoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source> <translation>Licoin adresi giriniz (mesela Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation> </message> <message> <location line="-2"/> <source>Click &quot;Sign Message&quot; to generate signature</source> <translation>İmzayı oluşturmak için &quot;Mesaj İmzala&quot; unsurunu tıklayın</translation> </message> <message> <location line="+3"/> <source>Enter Litecoin signature</source> <translation>Licoin imzası gir</translation> </message> <message> <location line="+82"/> <location line="+81"/> <source>The entered address is invalid.</source> <translation>Girilen adres geçersizdir.</translation> </message> <message> <location line="-81"/> <location line="+8"/> <location line="+73"/> <location line="+8"/> <source>Please check the address and try again.</source> <translation>Adresi kontrol edip tekrar deneyiniz.</translation> </message> <message> <location line="-81"/> <location line="+81"/> <source>The entered address does not refer to a key.</source> <translation>Girilen adres herhangi bir anahtara işaret etmemektedir.</translation> </message> <message> <location line="-73"/> <source>Wallet unlock was cancelled.</source> <translation>Cüzdan kilidinin açılması iptal edildi.</translation> </message> <message> <location line="+8"/> <source>Private key for the entered address is not available.</source> <translation>Girilen adres için özel anahtar mevcut değildir.</translation> </message> <message> <location line="+12"/> <source>Message signing failed.</source> <translation>Mesajın imzalanması başarısız oldu.</translation> </message> <message> <location line="+5"/> <source>Message signed.</source> <translation>Mesaj imzalandı.</translation> </message> <message> <location line="+59"/> <source>The signature could not be decoded.</source> <translation>İmzanın kodu çözülemedi.</translation> </message> <message> <location line="+0"/> <location line="+13"/> <source>Please check the signature and try again.</source> <translation>İmzayı kontrol edip tekrar deneyiniz.</translation> </message> <message> <location line="+0"/> <source>The signature did not match the message digest.</source> <translation>İmza mesajın hash değeri ile eşleşmedi.</translation> </message> <message> <location line="+7"/> <source>Message verification failed.</source> <translation>Mesaj doğrulaması başarısız oldu.</translation> </message> <message> <location line="+5"/> <source>Message verified.</source> <translation>Mesaj doğrulandı.</translation> </message> </context> <context> <name>SplashScreen</name> <message> <location filename="../splashscreen.cpp" line="+22"/> <source>The Litecoin developers</source> <translation>Licoin geliştiricileri</translation> </message> <message> <location line="+1"/> <source>[testnet]</source> <translation>[testnet]</translation> </message> </context> <context> <name>TransactionDesc</name> <message> <location filename="../transactiondesc.cpp" line="+20"/> <source>Open until %1</source> <translation>%1 değerine dek açık</translation> </message> <message> <location line="+6"/> <source>%1/offline</source> <translation>%1/çevrim dışı</translation> </message> <message> <location line="+2"/> <source>%1/unconfirmed</source> <translation>%1/doğrulanmadı</translation> </message> <message> <location line="+2"/> <source>%1 confirmations</source> <translation>%1 teyit</translation> </message> <message> <location line="+18"/> <source>Status</source> <translation>Durum</translation> </message> <message numerus="yes"> <location line="+7"/> <source>, broadcast through %n node(s)</source> <translation><numerusform>, %n düğüm vasıtasıyla yayınlandı</numerusform><numerusform>, %n düğüm vasıtasıyla yayınlandı</numerusform></translation> </message> <message> <location line="+4"/> <source>Date</source> <translation>Tarih</translation> </message> <message> <location line="+7"/> <source>Source</source> <translation>Kaynak</translation> </message> <message> <location line="+0"/> <source>Generated</source> <translation>Oluşturuldu</translation> </message> <message> <location line="+5"/> <location line="+17"/> <source>From</source> <translation>Gönderen</translation> </message> <message> <location line="+1"/> <location line="+22"/> <location line="+58"/> <source>To</source> <translation>Alıcı</translation> </message> <message> <location line="-77"/> <location line="+2"/> <source>own address</source> <translation>kendi adresiniz</translation> </message> <message> <location line="-2"/> <source>label</source> <translation>etiket</translation> </message> <message> <location line="+37"/> <location line="+12"/> <location line="+45"/> <location line="+17"/> <location line="+30"/> <source>Credit</source> <translation>Gider</translation> </message> <message numerus="yes"> <location line="-102"/> <source>matures in %n more block(s)</source> <translation><numerusform>%n ek blok sonrasında olgunlaşacak</numerusform><numerusform>%n ek blok sonrasında olgunlaşacak</numerusform></translation> </message> <message> <location line="+2"/> <source>not accepted</source> <translation>kabul edilmedi</translation> </message> <message> <location line="+44"/> <location line="+8"/> <location line="+15"/> <location line="+30"/> <source>Debit</source> <translation>Gelir</translation> </message> <message> <location line="-39"/> <source>Transaction fee</source> <translation>Muamele ücreti</translation> </message> <message> <location line="+16"/> <source>Net amount</source> <translation>Net miktar</translation> </message> <message> <location line="+6"/> <source>Message</source> <translation>Mesaj</translation> </message> <message> <location line="+2"/> <source>Comment</source> <translation>Yorum</translation> </message> <message> <location line="+2"/> <source>Transaction ID</source> <translation>Muamele tanımlayıcı</translation> </message> <message> <location line="+3"/> <source>Generated coins must mature 120 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to &quot;not accepted&quot; and it won&apos;t be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation>Oluşturulan Licoin&apos;lerin harcanabilmelerinden önce 120 blok beklemeleri gerekmektedir. Bu blok, oluşturduğunuzda blok zincirine eklenmesi için ağda yayınlandı. Zincire eklenmesi başarısız olursa, durumu &quot;kabul edilmedi&quot; olarak değiştirilecek ve harcanamayacaktır. Bu, bazen başka bir düğüm sizden birkaç saniye önce ya da sonra blok oluşturursa meydana gelebilir.</translation> </message> <message> <location line="+7"/> <source>Debug information</source> <translation>Hata ayıklama verileri</translation> </message> <message> <location line="+8"/> <source>Transaction</source> <translation>Muamele</translation> </message> <message> <location line="+3"/> <source>Inputs</source> <translation>Girdiler</translation> </message> <message> <location line="+23"/> <source>Amount</source> <translation>Miktar</translation> </message> <message> <location line="+1"/> <source>true</source> <translation>doğru</translation> </message> <message> <location line="+0"/> <source>false</source> <translation>yanlış</translation> </message> <message> <location line="-209"/> <source>, has not been successfully broadcast yet</source> <translation>, henüz başarılı bir şekilde yayınlanmadı</translation> </message> <message numerus="yes"> <location line="-35"/> <source>Open for %n more block(s)</source> <translation><numerusform>%n ilâve blok için açık</numerusform><numerusform>%n ilâve blok için açık</numerusform></translation> </message> <message> <location line="+70"/> <source>unknown</source> <translation>bilinmiyor</translation> </message> </context> <context> <name>TransactionDescDialog</name> <message> <location filename="../forms/transactiondescdialog.ui" line="+14"/> <source>Transaction details</source> <translation>Muamele detayları</translation> </message> <message> <location line="+6"/> <source>This pane shows a detailed description of the transaction</source> <translation>Bu pano muamelenin ayrıntılı açıklamasını gösterir</translation> </message> </context> <context> <name>TransactionTableModel</name> <message> <location filename="../transactiontablemodel.cpp" line="+225"/> <source>Date</source> <translation>Tarih</translation> </message> <message> <location line="+0"/> <source>Type</source> <translation>Tür</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Adres</translation> </message> <message> <location line="+0"/> <source>Amount</source> <translation>Miktar</translation> </message> <message numerus="yes"> <location line="+57"/> <source>Open for %n more block(s)</source> <translation><numerusform>%n ilâve blok için açık</numerusform><numerusform>%n ilâve blok için açık</numerusform></translation> </message> <message> <location line="+3"/> <source>Open until %1</source> <translation>%1 değerine dek açık</translation> </message> <message> <location line="+3"/> <source>Offline (%1 confirmations)</source> <translation>Çevrimdışı (%1 teyit)</translation> </message> <message> <location line="+3"/> <source>Unconfirmed (%1 of %2 confirmations)</source> <translation>Doğrulanmadı (%1 (toplam %2 üzerinden) teyit)</translation> </message> <message> <location line="+3"/> <source>Confirmed (%1 confirmations)</source> <translation>Doğrulandı (%1 teyit)</translation> </message> <message numerus="yes"> <location line="+8"/> <source>Mined balance will be available when it matures in %n more block(s)</source> <translation><numerusform>Madenden çıkarılan bakiye %n ek blok sonrasında olgunlaştığında kullanılabilecektir</numerusform><numerusform>Madenden çıkarılan bakiye %n ek blok sonrasında olgunlaştığında kullanılabilecektir</numerusform></translation> </message> <message> <location line="+5"/> <source>This block was not received by any other nodes and will probably not be accepted!</source> <translation>Bu blok başka hiçbir düğüm tarafından alınmamıştır ve muhtemelen kabul edilmeyecektir!</translation> </message> <message> <location line="+3"/> <source>Generated but not accepted</source> <translation>Oluşturuldu ama kabul edilmedi</translation> </message> <message> <location line="+43"/> <source>Received with</source> <translation>Şununla alındı</translation> </message> <message> <location line="+2"/> <source>Received from</source> <translation>Alındığı kişi</translation> </message> <message> <location line="+3"/> <source>Sent to</source> <translation>Gönderildiği adres</translation> </message> <message> <location line="+2"/> <source>Payment to yourself</source> <translation>Kendinize ödeme</translation> </message> <message> <location line="+2"/> <source>Mined</source> <translation>Madenden çıkarılan</translation> </message> <message> <location line="+38"/> <source>(n/a)</source> <translation>(mevcut değil)</translation> </message> <message> <location line="+199"/> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation>Muamele durumu. Doğrulama sayısını görüntülemek için imleci bu alanda tutunuz.</translation> </message> <message> <location line="+2"/> <source>Date and time that the transaction was received.</source> <translation>Muamelenin alındığı tarih ve zaman.</translation> </message> <message> <location line="+2"/> <source>Type of transaction.</source> <translation>Muamele türü.</translation> </message> <message> <location line="+2"/> <source>Destination address of transaction.</source> <translation>Muamelenin alıcı adresi.</translation> </message> <message> <location line="+2"/> <source>Amount removed from or added to balance.</source> <translation>Bakiyeden alınan ya da bakiyeye eklenen miktar.</translation> </message> </context> <context> <name>TransactionView</name> <message> <location filename="../transactionview.cpp" line="+52"/> <location line="+16"/> <source>All</source> <translation>Hepsi</translation> </message> <message> <location line="-15"/> <source>Today</source> <translation>Bugün</translation> </message> <message> <location line="+1"/> <source>This week</source> <translation>Bu hafta</translation> </message> <message> <location line="+1"/> <source>This month</source> <translation>Bu ay</translation> </message> <message> <location line="+1"/> <source>Last month</source> <translation>Geçen ay</translation> </message> <message> <location line="+1"/> <source>This year</source> <translation>Bu sene</translation> </message> <message> <location line="+1"/> <source>Range...</source> <translation>Aralık...</translation> </message> <message> <location line="+11"/> <source>Received with</source> <translation>Şununla alınan</translation> </message> <message> <location line="+2"/> <source>Sent to</source> <translation>Gönderildiği adres</translation> </message> <message> <location line="+2"/> <source>To yourself</source> <translation>Kendinize</translation> </message> <message> <location line="+1"/> <source>Mined</source> <translation>Oluşturulan</translation> </message> <message> <location line="+1"/> <source>Other</source> <translation>Diğer</translation> </message> <message> <location line="+7"/> <source>Enter address or label to search</source> <translation>Aranacak adres ya da etiket giriniz</translation> </message> <message> <location line="+7"/> <source>Min amount</source> <translation>Asgari miktar</translation> </message> <message> <location line="+34"/> <source>Copy address</source> <translation>Adresi kopyala</translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation>Etiketi kopyala</translation> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation>Miktarı kopyala</translation> </message> <message> <location line="+1"/> <source>Copy transaction ID</source> <translation>Muamele kimliğini kopyala</translation> </message> <message> <location line="+1"/> <source>Edit label</source> <translation>Etiketi düzenle</translation> </message> <message> <location line="+1"/> <source>Show transaction details</source> <translation>Muamele detaylarını göster</translation> </message> <message> <location line="+139"/> <source>Export Transaction Data</source> <translation>Muamele verilerini dışa aktar</translation> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>Virgülle ayrılmış değerler dosyası (*.csv)</translation> </message> <message> <location line="+8"/> <source>Confirmed</source> <translation>Doğrulandı</translation> </message> <message> <location line="+1"/> <source>Date</source> <translation>Tarih</translation> </message> <message> <location line="+1"/> <source>Type</source> <translation>Tür</translation> </message> <message> <location line="+1"/> <source>Label</source> <translation>Etiket</translation> </message> <message> <location line="+1"/> <source>Address</source> <translation>Adres</translation> </message> <message> <location line="+1"/> <source>Amount</source> <translation>Miktar</translation> </message> <message> <location line="+1"/> <source>ID</source> <translation>Tanımlayıcı</translation> </message> <message> <location line="+4"/> <source>Error exporting</source> <translation>Dışa aktarımda hata oluştu</translation> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation>%1 dosyasına yazılamadı.</translation> </message> <message> <location line="+100"/> <source>Range:</source> <translation>Aralık:</translation> </message> <message> <location line="+8"/> <source>to</source> <translation>ilâ</translation> </message> </context> <context> <name>WalletModel</name> <message> <location filename="../walletmodel.cpp" line="+193"/> <source>Send Coins</source> <translation>Licoin yolla</translation> </message> </context> <context> <name>WalletView</name> <message> <location filename="../walletview.cpp" line="+42"/> <source>&amp;Export</source> <translation>&amp;Dışa aktar</translation> </message> <message> <location line="+1"/> <source>Export the data in the current tab to a file</source> <translation>Güncel sekmedeki verileri bir dosyaya aktar</translation> </message> <message> <location line="+193"/> <source>Backup Wallet</source> <translation>Cüzdanı yedekle</translation> </message> <message> <location line="+0"/> <source>Wallet Data (*.dat)</source> <translation>Cüzdan verileri (*.dat)</translation> </message> <message> <location line="+3"/> <source>Backup Failed</source> <translation>Yedekleme başarısız oldu</translation> </message> <message> <location line="+0"/> <source>There was an error trying to save the wallet data to the new location.</source> <translation>Cüzdanı değişik bir konuma kaydetmek denenirken bir hata meydana geldi.</translation> </message> <message> <location line="+4"/> <source>Backup Successful</source> <translation>Yedekleme başarılı</translation> </message> <message> <location line="+0"/> <source>The wallet data was successfully saved to the new location.</source> <translation>Cüzdan verileri başarılı bir şekilde yeni konuma kaydedildi.</translation> </message> </context> <context> <name>bitcoin-core</name> <message> <location filename="../bitcoinstrings.cpp" line="+94"/> <source>Litecoin version</source> <translation>Licoin sürümü</translation> </message> <message> <location line="+102"/> <source>Usage:</source> <translation>Kullanım:</translation> </message> <message> <location line="-29"/> <source>Send command to -server or litecoind</source> <translation>-server ya da Licoind&apos;ye komut gönder</translation> </message> <message> <location line="-23"/> <source>List commands</source> <translation>Komutları listele</translation> </message> <message> <location line="-12"/> <source>Get help for a command</source> <translation>Bir komut için yardım al</translation> </message> <message> <location line="+24"/> <source>Options:</source> <translation>Seçenekler:</translation> </message> <message> <location line="+24"/> <source>Specify configuration file (default: litecoin.conf)</source> <translation>Yapılandırma dosyası belirt (varsayılan: Licoin.conf)</translation> </message> <message> <location line="+3"/> <source>Specify pid file (default: litecoind.pid)</source> <translation>Pid dosyası belirt (varsayılan: Licoind.pid)</translation> </message> <message> <location line="-1"/> <source>Specify data directory</source> <translation>Veri dizinini belirt</translation> </message> <message> <location line="-9"/> <source>Set database cache size in megabytes (default: 25)</source> <translation>Veritabanı önbellek boyutunu megabayt olarak belirt (varsayılan: 25)</translation> </message> <message> <location line="-28"/> <source>Listen for connections on &lt;port&gt; (default: 9333 or testnet: 19333)</source> <translation>Bağlantılar için dinlenecek &lt;port&gt; (varsayılan: 9333 ya da testnet: 19333)</translation> </message> <message> <location line="+5"/> <source>Maintain at most &lt;n&gt; connections to peers (default: 125)</source> <translation>Eşler ile en çok &lt;n&gt; adet bağlantı kur (varsayılan: 125)</translation> </message> <message> <location line="-48"/> <source>Connect to a node to retrieve peer addresses, and disconnect</source> <translation>Eş adresleri elde etmek için bir düğüme bağlan ve ardından bağlantıyı kes</translation> </message> <message> <location line="+82"/> <source>Specify your own public address</source> <translation>Kendi genel adresinizi tanımlayın</translation> </message> <message> <location line="+3"/> <source>Threshold for disconnecting misbehaving peers (default: 100)</source> <translation>Aksaklık gösteren eşlerle bağlantıyı kesme sınırı (varsayılan: 100)</translation> </message> <message> <location line="-134"/> <source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source> <translation>Aksaklık gösteren eşlerle yeni bağlantıları engelleme süresi, saniye olarak (varsayılan: 86400)</translation> </message> <message> <location line="-29"/> <source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source> <translation>IPv4 üzerinde dinlemek için %u numaralı RPC portunun kurulumu sırasında hata meydana geldi: %s</translation> </message> <message> <location line="+27"/> <source>Listen for JSON-RPC connections on &lt;port&gt; (default: 9332 or testnet: 19332)</source> <translation>JSON-RPC bağlantılarını &lt;port&gt; üzerinde dinle (varsayılan: 9332 veya tesnet: 19332)</translation> </message> <message> <location line="+37"/> <source>Accept command line and JSON-RPC commands</source> <translation>Konut satırı ve JSON-RPC komutlarını kabul et</translation> </message> <message> <location line="+76"/> <source>Run in the background as a daemon and accept commands</source> <translation>Arka planda daemon (servis) olarak çalış ve komutları kabul et</translation> </message> <message> <location line="+37"/> <source>Use the test network</source> <translation>Deneme şebekesini kullan</translation> </message> <message> <location line="-112"/> <source>Accept connections from outside (default: 1 if no -proxy or -connect)</source> <translation>Dışarıdan gelen bağlantıları kabul et (varsayılan: -proxy veya -connect yoksa 1)</translation> </message> <message> <location line="-80"/> <source>%s, you must set a rpcpassword in the configuration file: %s It is recommended you use the following random password: rpcuser=litecoinrpc rpcpassword=%s (you do not need to remember this password) The username and password MUST NOT be the same. If the file does not exist, create it with owner-readable-only file permissions. It is also recommended to set alertnotify so you are notified of problems; for example: alertnotify=echo %%s | mail -s &quot;Litecoin Alert&quot; [email protected] </source> <translation>%s, şu yapılandırma dosyasında rpc parolası belirtmeniz gerekir: %s Aşağıdaki rastgele oluşturulan parolayı kullanmanız tavsiye edilir: rpcuser=litecoinrpc rpcpassword=%s (bu parolayı hatırlamanız gerekli değildir) Kullanıcı ismi ile parolanın FARKLI olmaları gerekir. Dosya mevcut değilse, sadece sahibi için okumayla sınırlı izin ile oluşturunuz. Sorunlar hakkında bildiri almak için alertnotify unsurunu ayarlamanız tavsiye edilir; mesela: alertnotify=echo %%s | mail -s &quot;Litecoin Alert&quot; [email protected] </translation> </message> <message> <location line="+17"/> <source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source> <translation>IPv6 üzerinde dinlemek için %u numaralı RPC portu kurulurken bir hata meydana geldi, IPv4&apos;e dönülüyor: %s</translation> </message> <message> <location line="+3"/> <source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source> <translation>Belirtilen adrese bağlan ve daima ondan dinle. IPv6 için [makine]:port yazımını kullanınız</translation> </message> <message> <location line="+3"/> <source>Cannot obtain a lock on data directory %s. Litecoin is probably already running.</source> <translation>%s veri dizininde kilit elde edilemedi. Licoin muhtemelen hâlihazırda çalışmaktadır.</translation> </message> <message> <location line="+3"/> <source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation>Hata: Muamele reddedildi! Cüzdanınızdaki madenî paraların bazıları zaten harcanmış olduğunda bu meydana gelebilir. Örneğin wallet.dat dosyasının bir kopyasını kullandıysanız ve kopyada para harcandığında ancak burada harcandığı işaretlenmediğinde.</translation> </message> <message> <location line="+4"/> <source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source> <translation>Hata: Muamelenin miktarı, karmaşıklığı ya da yakın geçmişte alınan fonların kullanılması nedeniyle bu muamele en az %s tutarında ücret gerektirmektedir!</translation> </message> <message> <location line="+3"/> <source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source> <translation>İlgili bir uyarı alındığında komut çalıştır (komuttaki %s mesaj ile değiştirilecektir)</translation> </message> <message> <location line="+3"/> <source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source> <translation>Bir cüzdan muamelesi değiştiğinde komutu çalıştır (komuttaki %s TxID ile değiştirilecektir)</translation> </message> <message> <location line="+11"/> <source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source> <translation>Yüksek öncelikli/düşük ücretli muamelelerin boyutunu bayt olarak tanımla (varsayılan: 27000)</translation> </message> <message> <location line="+6"/> <source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source> <translation>Bu yayın öncesi bir deneme sürümüdür - tüm riski siz üstlenmiş olursunuz - Licoin oluşturmak ya da ticari uygulamalar için kullanmayınız</translation> </message> <message> <location line="+5"/> <source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source> <translation>Uyarı: -paytxfee çok yüksek bir değere ayarlanmış! Bu, muamele gönderirseniz ödeyeceğiniz muamele ücretidir.</translation> </message> <message> <location line="+3"/> <source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source> <translation>Uyarı: Görüntülenen muameleler doğru olmayabilir! Sizin ya da diğer düğümlerin güncelleme yapması gerekebilir.</translation> </message> <message> <location line="+3"/> <source>Warning: Please check that your computer&apos;s date and time are correct! If your clock is wrong Litecoin will not work properly.</source> <translation>Uyarı: Lütfen bilgisayarınızın tarih ve saatinin doğru olup olmadığını kontrol ediniz! Saatiniz doğru değilse Licoin gerektiği gibi çalışamaz.</translation> </message> <message> <location line="+3"/> <source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation>Uyarı: wallet.dat dosyasının okunması sırasında bir hata meydana geldi! Tüm anahtarlar doğru bir şekilde okundu, ancak muamele verileri ya da adres defteri unsurları hatalı veya eksik olabilir.</translation> </message> <message> <location line="+3"/> <source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source> <translation>Uyarı: wallet.dat bozuk, veriler geri kazanıldı! Özgün wallet.dat, wallet.{zamandamgası}.bak olarak %s klasörüne kaydedildi; bakiyeniz ya da muameleleriniz yanlışsa bir yedeklemeden tekrar yüklemeniz gerekir.</translation> </message> <message> <location line="+14"/> <source>Attempt to recover private keys from a corrupt wallet.dat</source> <translation>Bozuk bir wallet.dat dosyasından özel anahtarları geri kazanmayı dene</translation> </message> <message> <location line="+2"/> <source>Block creation options:</source> <translation>Blok oluşturma seçenekleri:</translation> </message> <message> <location line="+5"/> <source>Connect only to the specified node(s)</source> <translation>Sadece belirtilen düğüme veya düğümlere bağlan</translation> </message> <message> <location line="+3"/> <source>Corrupted block database detected</source> <translation>Bozuk blok veritabanı tespit edildi</translation> </message> <message> <location line="+1"/> <source>Discover own IP address (default: 1 when listening and no -externalip)</source> <translation>Kendi IP adresini keşfet (varsayılan: dinlenildiğinde ve -externalip yoksa 1)</translation> </message> <message> <location line="+1"/> <source>Do you want to rebuild the block database now?</source> <translation>Blok veritabanını şimdi yeniden inşa etmek istiyor musunuz?</translation> </message> <message> <location line="+2"/> <source>Error initializing block database</source> <translation>Blok veritabanını başlatılırken bir hata meydana geldi</translation> </message> <message> <location line="+1"/> <source>Error initializing wallet database environment %s!</source> <translation>%s cüzdan veritabanı ortamının başlatılmasında hata meydana geldi!</translation> </message> <message> <location line="+1"/> <source>Error loading block database</source> <translation>Blok veritabanının yüklenmesinde hata</translation> </message> <message> <location line="+4"/> <source>Error opening block database</source> <translation>Blok veritabanının açılışı sırasında hata</translation> </message> <message> <location line="+2"/> <source>Error: Disk space is low!</source> <translation>Hata: Disk alanı düşük!</translation> </message> <message> <location line="+1"/> <source>Error: Wallet locked, unable to create transaction!</source> <translation>Hata: Cüzdan kilitli, muamele oluşturulamadı!</translation> </message> <message> <location line="+1"/> <source>Error: system error: </source> <translation>Hata: sistem hatası:</translation> </message> <message> <location line="+1"/> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation>Herhangi bir portun dinlenmesi başarısız oldu. Bunu istiyorsanız -listen=0 seçeneğini kullanınız.</translation> </message> <message> <location line="+1"/> <source>Failed to read block info</source> <translation>Blok verileri okunamadı</translation> </message> <message> <location line="+1"/> <source>Failed to read block</source> <translation>Blok okunamadı</translation> </message> <message> <location line="+1"/> <source>Failed to sync block index</source> <translation>Blok indeksi eşleştirilemedi</translation> </message> <message> <location line="+1"/> <source>Failed to write block index</source> <translation>Blok indeksi yazılamadı</translation> </message> <message> <location line="+1"/> <source>Failed to write block info</source> <translation>Blok verileri yazılamadı</translation> </message> <message> <location line="+1"/> <source>Failed to write block</source> <translation>Blok yazılamadı</translation> </message> <message> <location line="+1"/> <source>Failed to write file info</source> <translation>Dosya verileri yazılamadı</translation> </message> <message> <location line="+1"/> <source>Failed to write to coin database</source> <translation>Madenî para veritabanına yazılamadı</translation> </message> <message> <location line="+1"/> <source>Failed to write transaction index</source> <translation>Muamele indeksi yazılamadı</translation> </message> <message> <location line="+1"/> <source>Failed to write undo data</source> <translation>Geri alma verilerinin yazılamadı</translation> </message> <message> <location line="+2"/> <source>Find peers using DNS lookup (default: 1 unless -connect)</source> <translation>Eşleri DNS araması vasıtasıyla bul (varsayılan: 1, eğer -connect kullanılmadıysa)</translation> </message> <message> <location line="+1"/> <source>Generate coins (default: 0)</source> <translation>Licoin oluştur (varsayılan: 0)</translation> </message> <message> <location line="+2"/> <source>How many blocks to check at startup (default: 288, 0 = all)</source> <translation>Başlangıçta kontrol edilecek blok sayısı (varsayılan: 288, 0 = hepsi)</translation> </message> <message> <location line="+1"/> <source>How thorough the block verification is (0-4, default: 3)</source> <translation>Blok kontrolünün ne kadar derin olacağı (0 ilâ 4, varsayılan: 3)</translation> </message> <message> <location line="+19"/> <source>Not enough file descriptors available.</source> <translation>Kafi derecede dosya tanımlayıcıları mevcut değil.</translation> </message> <message> <location line="+8"/> <source>Rebuild block chain index from current blk000??.dat files</source> <translation>Blok zinciri indeksini güncel blk000??.dat dosyalarından tekrar inşa et</translation> </message> <message> <location line="+16"/> <source>Set the number of threads to service RPC calls (default: 4)</source> <translation>RPC aramaları için iş parçacığı sayısını belirle (varsayılan: 4)</translation> </message> <message> <location line="+26"/> <source>Verifying blocks...</source> <translation>Bloklar kontrol ediliyor...</translation> </message> <message> <location line="+1"/> <source>Verifying wallet...</source> <translation>Cüzdan kontrol ediliyor...</translation> </message> <message> <location line="-69"/> <source>Imports blocks from external blk000??.dat file</source> <translation>Harici blk000??.dat dosyasından blokları içe aktarır</translation> </message> <message> <location line="-76"/> <source>Set the number of script verification threads (up to 16, 0 = auto, &lt;0 = leave that many cores free, default: 0)</source> <translation>Betik kontrolü iş parçacığı sayısını belirt (azami 16, 0 = otomatik, &lt;0 = bu sayıda çekirdeği boş bırak, varsayılan: 0)</translation> </message> <message> <location line="+77"/> <source>Information</source> <translation>Bilgi</translation> </message> <message> <location line="+3"/> <source>Invalid -tor address: &apos;%s&apos;</source> <translation>Geçersiz -tor adresi: &apos;%s&apos;</translation> </message> <message> <location line="+1"/> <source>Invalid amount for -minrelaytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>-minrelaytxfee=&lt;amount&gt; için geçersiz meblağ: &apos;%s&apos;</translation> </message> <message> <location line="+1"/> <source>Invalid amount for -mintxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>-mintxfee=&lt;amount&gt; için geçersiz meblağ: &apos;%s&apos;</translation> </message> <message> <location line="+8"/> <source>Maintain a full transaction index (default: 0)</source> <translation>Muamelelerin tamamının indeksini tut (varsayılan: 0)</translation> </message> <message> <location line="+2"/> <source>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: 5000)</source> <translation>Bağlantı başına azami alım tamponu, &lt;n&gt;*1000 bayt (varsayılan: 5000)</translation> </message> <message> <location line="+1"/> <source>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: 1000)</source> <translation>Bağlantı başına azami yollama tamponu, &lt;n&gt;*1000 bayt (varsayılan: 1000)</translation> </message> <message> <location line="+2"/> <source>Only accept block chain matching built-in checkpoints (default: 1)</source> <translation>Sadece yerleşik kontrol noktalarıyla eşleşen blok zincirini kabul et (varsayılan: 1)</translation> </message> <message> <location line="+1"/> <source>Only connect to nodes in network &lt;net&gt; (IPv4, IPv6 or Tor)</source> <translation>Sadece &lt;net&gt; şebekesindeki düğümlere bağlan (IPv4, IPv6 ya da Tor)</translation> </message> <message> <location line="+2"/> <source>Output extra debugging information. Implies all other -debug* options</source> <translation>İlâve hata ayıklama verileri çıkart. Diğer tüm -debug* seçeneklerini ima eder</translation> </message> <message> <location line="+1"/> <source>Output extra network debugging information</source> <translation>İlâve şebeke hata ayıklama verileri çıkart</translation> </message> <message> <location line="+2"/> <source>Prepend debug output with timestamp</source> <translation>Hata ayıklama çıktısına tarih ön ekleri ilâve et</translation> </message> <message> <location line="+5"/> <source>SSL options: (see the Litecoin Wiki for SSL setup instructions)</source> <translation> SSL seçenekleri: (SSL kurulum bilgisi için Licoin vikisine bakınız)</translation> </message> <message> <location line="+1"/> <source>Select the version of socks proxy to use (4-5, default: 5)</source> <translation>Kullanılacak socks vekil sunucu sürümünü seç (4-5, varsayılan: 5)</translation> </message> <message> <location line="+3"/> <source>Send trace/debug info to console instead of debug.log file</source> <translation>Trace/hata ayıklama verilerini debug.log dosyası yerine konsola gönder</translation> </message> <message> <location line="+1"/> <source>Send trace/debug info to debugger</source> <translation>Hata ayıklayıcıya -debugger- trace/hata ayıklama verileri gönder</translation> </message> <message> <location line="+5"/> <source>Set maximum block size in bytes (default: 250000)</source> <translation>Bayt olarak azami blok boyutunu tanımla (varsayılan: 250000)</translation> </message> <message> <location line="+1"/> <source>Set minimum block size in bytes (default: 0)</source> <translation>Bayt olarak asgari blok boyutunu tanımla (varsayılan: 0)</translation> </message> <message> <location line="+2"/> <source>Shrink debug.log file on client startup (default: 1 when no -debug)</source> <translation>İstemci başlatıldığında debug.log dosyasını küçült (varsayılan: -debug bulunmadığında 1)</translation> </message> <message> <location line="+1"/> <source>Signing transaction failed</source> <translation>Muamelenin imzalanması başarısız oldu</translation> </message> <message> <location line="+2"/> <source>Specify connection timeout in milliseconds (default: 5000)</source> <translation>Bağlantı zaman aşım süresini milisaniye olarak belirt (varsayılan: 5000)</translation> </message> <message> <location line="+4"/> <source>System error: </source> <translation>Sistem hatası:</translation> </message> <message> <location line="+4"/> <source>Transaction amount too small</source> <translation>Muamele meblağı çok düşük</translation> </message> <message> <location line="+1"/> <source>Transaction amounts must be positive</source> <translation>Muamele tutarının pozitif olması lazımdır</translation> </message> <message> <location line="+1"/> <source>Transaction too large</source> <translation>Muamele çok büyük</translation> </message> <message> <location line="+7"/> <source>Use UPnP to map the listening port (default: 0)</source> <translation>Dinlenecek portu haritalamak için UPnP kullan (varsayılan: 0)</translation> </message> <message> <location line="+1"/> <source>Use UPnP to map the listening port (default: 1 when listening)</source> <translation>Dinlenecek portu haritalamak için UPnP kullan (varsayılan: dinlenildiğinde 1)</translation> </message> <message> <location line="+1"/> <source>Use proxy to reach tor hidden services (default: same as -proxy)</source> <translation>Gizli tor servislerine erişmek için vekil sunucu kullan (varsayılan: -proxy ile aynısı)</translation> </message> <message> <location line="+2"/> <source>Username for JSON-RPC connections</source> <translation>JSON-RPC bağlantıları için kullanıcı ismi</translation> </message> <message> <location line="+4"/> <source>Warning</source> <translation>Uyarı</translation> </message> <message> <location line="+1"/> <source>Warning: This version is obsolete, upgrade required!</source> <translation>Uyarı: Bu sürüm çok eskidir, güncellemeniz gerekir!</translation> </message> <message> <location line="+1"/> <source>You need to rebuild the databases using -reindex to change -txindex</source> <translation>-txindex&apos;i değiştirmek için veritabanlarını -reindex kullanarak yeniden inşa etmeniz gerekir.</translation> </message> <message> <location line="+1"/> <source>wallet.dat corrupt, salvage failed</source> <translation>wallet.dat bozuk, geri kazanım başarısız oldu</translation> </message> <message> <location line="-50"/> <source>Password for JSON-RPC connections</source> <translation>JSON-RPC bağlantıları için parola</translation> </message> <message> <location line="-67"/> <source>Allow JSON-RPC connections from specified IP address</source> <translation>Belirtilen İP adresinden JSON-RPC bağlantılarını kabul et</translation> </message> <message> <location line="+76"/> <source>Send commands to node running on &lt;ip&gt; (default: 127.0.0.1)</source> <translation>Şu &lt;ip&gt; adresinde (varsayılan: 127.0.0.1) çalışan düğüme komut yolla</translation> </message> <message> <location line="-120"/> <source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source> <translation>En iyi blok değiştiğinde komutu çalıştır (komut için %s parametresi blok hash değeri ile değiştirilecektir)</translation> </message> <message> <location line="+147"/> <source>Upgrade wallet to latest format</source> <translation>Cüzdanı en yeni biçime güncelle</translation> </message> <message> <location line="-21"/> <source>Set key pool size to &lt;n&gt; (default: 100)</source> <translation>Anahtar alan boyutunu &lt;n&gt; değerine ayarla (varsayılan: 100)</translation> </message> <message> <location line="-12"/> <source>Rescan the block chain for missing wallet transactions</source> <translation>Blok zincirini eksik cüzdan muameleleri için tekrar tara</translation> </message> <message> <location line="+35"/> <source>Use OpenSSL (https) for JSON-RPC connections</source> <translation>JSON-RPC bağlantıları için OpenSSL (https) kullan</translation> </message> <message> <location line="-26"/> <source>Server certificate file (default: server.cert)</source> <translation>Sunucu sertifika dosyası (varsayılan: server.cert)</translation> </message> <message> <location line="+1"/> <source>Server private key (default: server.pem)</source> <translation>Sunucu özel anahtarı (varsayılan: server.pem)</translation> </message> <message> <location line="-151"/> <source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source> <translation>Kabul edilebilir şifreler (varsayılan: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</translation> </message> <message> <location line="+165"/> <source>This help message</source> <translation>Bu yardım mesajı</translation> </message> <message> <location line="+6"/> <source>Unable to bind to %s on this computer (bind returned error %d, %s)</source> <translation>Bu bilgisayarda %s unsuruna bağlanılamadı. (bind şu hatayı iletti: %d, %s)</translation> </message> <message> <location line="-91"/> <source>Connect through socks proxy</source> <translation>Socks vekil sunucusu vasıtasıyla bağlan</translation> </message> <message> <location line="-10"/> <source>Allow DNS lookups for -addnode, -seednode and -connect</source> <translation>-addnode, -seednode ve -connect için DNS aramalarına izin ver</translation> </message> <message> <location line="+55"/> <source>Loading addresses...</source> <translation>Adresler yükleniyor...</translation> </message> <message> <location line="-35"/> <source>Error loading wallet.dat: Wallet corrupted</source> <translation>wallet.dat dosyasının yüklenmesinde hata oluştu: bozuk cüzdan</translation> </message> <message> <location line="+1"/> <source>Error loading wallet.dat: Wallet requires newer version of Litecoin</source> <translation>wallet.dat dosyasının yüklenmesinde hata oluştu: cüzdanın daha yeni bir Licoin sürümüne ihtiyacı var</translation> </message> <message> <location line="+93"/> <source>Wallet needed to be rewritten: restart Litecoin to complete</source> <translation>Cüzdanın tekrar yazılması gerekiyordu: işlemi tamamlamak için Licoin&apos;i yeniden başlatınız</translation> </message> <message> <location line="-95"/> <source>Error loading wallet.dat</source> <translation>wallet.dat dosyasının yüklenmesinde hata oluştu</translation> </message> <message> <location line="+28"/> <source>Invalid -proxy address: &apos;%s&apos;</source> <translation>Geçersiz -proxy adresi: &apos;%s&apos;</translation> </message> <message> <location line="+56"/> <source>Unknown network specified in -onlynet: &apos;%s&apos;</source> <translation>-onlynet için bilinmeyen bir şebeke belirtildi: &apos;%s&apos;</translation> </message> <message> <location line="-1"/> <source>Unknown -socks proxy version requested: %i</source> <translation>Bilinmeyen bir -socks vekil sürümü talep edildi: %i</translation> </message> <message> <location line="-96"/> <source>Cannot resolve -bind address: &apos;%s&apos;</source> <translation>-bind adresi çözümlenemedi: &apos;%s&apos;</translation> </message> <message> <location line="+1"/> <source>Cannot resolve -externalip address: &apos;%s&apos;</source> <translation>-externalip adresi çözümlenemedi: &apos;%s&apos;</translation> </message> <message> <location line="+44"/> <source>Invalid amount for -paytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>-paytxfee=&lt;miktar&gt; için geçersiz miktar: &apos;%s&apos;</translation> </message> <message> <location line="+1"/> <source>Invalid amount</source> <translation>Geçersiz miktar</translation> </message> <message> <location line="-6"/> <source>Insufficient funds</source> <translation>Yetersiz bakiye</translation> </message> <message> <location line="+10"/> <source>Loading block index...</source> <translation>Blok indeksi yükleniyor...</translation> </message> <message> <location line="-57"/> <source>Add a node to connect to and attempt to keep the connection open</source> <translation>Bağlanılacak düğüm ekle ve bağlantıyı zinde tutmaya çalış</translation> </message> <message> <location line="-25"/> <source>Unable to bind to %s on this computer. Litecoin is probably already running.</source> <translation>Bu bilgisayarda %s unsuruna bağlanılamadı. Licoin muhtemelen hâlihazırda çalışmaktadır.</translation> </message> <message> <location line="+64"/> <source>Fee per KB to add to transactions you send</source> <translation>Yolladığınız muameleler için eklenecek KB başı ücret</translation> </message> <message> <location line="+19"/> <source>Loading wallet...</source> <translation>Cüzdan yükleniyor...</translation> </message> <message> <location line="-52"/> <source>Cannot downgrade wallet</source> <translation>Cüzdan eski biçime geri alınamaz</translation> </message> <message> <location line="+3"/> <source>Cannot write default address</source> <translation>Varsayılan adres yazılamadı</translation> </message> <message> <location line="+64"/> <source>Rescanning...</source> <translation>Yeniden tarama...</translation> </message> <message> <location line="-57"/> <source>Done loading</source> <translation>Yükleme tamamlandı</translation> </message> <message> <location line="+82"/> <source>To use the %s option</source> <translation>%s seçeneğini kullanmak için</translation> </message> <message> <location line="-74"/> <source>Error</source> <translation>Hata</translation> </message> <message> <location line="-31"/> <source>You must set rpcpassword=&lt;password&gt; in the configuration file: %s If the file does not exist, create it with owner-readable-only file permissions.</source> <translation>rpcpassword=&lt;parola&gt; şu yapılandırma dosyasında belirtilmelidir: %s Dosya mevcut değilse, sadece sahibi için okumayla sınırlı izin ile oluşturunuz.</translation> </message> </context> </TS><|fim▁end|>
<translation>Girilen parolalar birbirleriyle uyumlu değil.</translation> </message>
<|file_name|>conf.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # # example_project documentation build configuration file, created by # sphinx-quickstart on Wed Aug 19 10:27:46 2009. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os import datetime # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.append(os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'example_project' copyright = u'%d, Lincoln Loop' % datetime.date.today().year # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.0' # The full version, including alpha/beta/rc tags. release = '1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None<|fim▁hole|> # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'example_projectdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'example_project.tex', u'example_project Documentation', u'Lincoln Loop', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True<|fim▁end|>
# A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None
<|file_name|>test_tabs.py<|end_file_name|><|fim▁begin|>""" Test cases for tabs. """ from mock import MagicMock, Mock, patch from courseware.courses import get_course_by_id from courseware.views import get_static_tab_contents from django.test.utils import override_settings from django.core.urlresolvers import reverse from student.tests.factories import UserFactory from xmodule.tabs import CourseTabList from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory from courseware.tests.helpers import get_request_for_user, LoginEnrollmentTestCase from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE from opaque_keys.edx.locations import SlashSeparatedCourseKey @override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE) class StaticTabDateTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase): """Test cases for Static Tab Dates.""" def setUp(self): self.course = CourseFactory.create() self.page = ItemFactory.create( category="static_tab", parent_location=self.course.location, data="OOGIE BLOOGIE", display_name="new_tab" ) self.toy_course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall') def test_logged_in(self): self.setup_user() url = reverse('static_tab', args=[self.course.id.to_deprecated_string(), 'new_tab']) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("OOGIE BLOOGIE", resp.content) def test_anonymous_user(self): url = reverse('static_tab', args=[self.course.id.to_deprecated_string(), 'new_tab']) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn("OOGIE BLOOGIE", resp.content) @override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE) def test_get_static_tab_contents(self): course = get_course_by_id(self.toy_course_key) request = get_request_for_user(UserFactory.create()) tab = CourseTabList.get_tab_by_slug(course.tabs, 'resources') # Test render works okay tab_content = get_static_tab_contents(request, course, tab) self.assertIn(self.toy_course_key.to_deprecated_string(), tab_content) self.assertIn('static_tab', tab_content) # Test when render raises an exception with patch('courseware.views.get_module') as mock_module_render:<|fim▁hole|> ) static_tab = get_static_tab_contents(request, course, tab) self.assertIn("this module is temporarily unavailable", static_tab) @override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE) class StaticTabDateTestCaseXML(LoginEnrollmentTestCase, ModuleStoreTestCase): # The following XML test course (which lives at common/test/data/2014) # is closed; we're testing that tabs still appear when # the course is already closed xml_course_key = SlashSeparatedCourseKey('edX', 'detached_pages', '2014') # this text appears in the test course's tab # common/test/data/2014/tabs/8e4cce2b4aaf4ba28b1220804619e41f.html xml_data = "static 463139" xml_url = "8e4cce2b4aaf4ba28b1220804619e41f" @patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False}) def test_logged_in_xml(self): self.setup_user() url = reverse('static_tab', args=[self.xml_course_key.to_deprecated_string(), self.xml_url]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn(self.xml_data, resp.content) @patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False}) def test_anonymous_user_xml(self): url = reverse('static_tab', args=[self.xml_course_key.to_deprecated_string(), self.xml_url]) resp = self.client.get(url) self.assertEqual(resp.status_code, 200) self.assertIn(self.xml_data, resp.content)<|fim▁end|>
mock_module_render.return_value = MagicMock( render=Mock(side_effect=Exception('Render failed!'))
<|file_name|>playback.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- #<|fim▁hole|># Copyright 2011 Liftoff Software Corporation # __doc__ = """\ playback.py - A plugin for Gate One that adds support for saving and playing back session recordings. .. note:: Yes this only contains one function and it is exposed to clients through a WebSocket hook. Hooks ----- This Python plugin file implements the following hooks:: hooks = { 'WebSocket': { 'playback_save_recording': save_recording, } } Docstrings ---------- """ # Meta __version__ = '1.0' __license__ = "GNU AGPLv3 or Proprietary (see LICENSE.txt)" __version_info__ = (1, 0) __author__ = 'Dan McDougall <[email protected]>' # Python stdlib import os from applications.locale import get_translation from applications.utils import render_string import io _ = get_translation() # Globals PLUGIN_PATH = os.path.split(__file__)[0] def get_256_colors(self): """ Returns the rendered 256-color CSS. """ colors_256_path = self.render_256_colors() mtime = os.stat(colors_256_path).st_mtime cached_filename = "%s:%s" % (colors_256_path.replace('/', '_'), mtime) cache_dir = self.ws.settings['cache_dir'] cached_file_path = os.path.join(cache_dir, cached_filename) if os.path.exists(cached_file_path): with open(cached_file_path) as f: colors_256 = f.read() else: # Debug mode is enabled with open(os.path.join(cache_dir, '256_colors.css')) as f: colors_256 = f.read() return colors_256 def save_recording(self, settings): """ Handles uploads of session recordings and returns them to the client in a self-contained HTML file that will auto-start playback. ..note:: The real crux of the code that handles this is in the template. """ #import tornado.template from datetime import datetime now = datetime.now().strftime('%Y%m%d%H%m%S') # e.g. '20120208200222' out_dict = { 'result': 'Success', 'filename': 'GateOne_recording-%s.html' % now, 'data': None, 'mimetype': 'text/html' } recording = settings["recording"] container = settings["container"] prefix = settings["prefix"] theme_css = settings['theme_css'] colors_css = settings['colors_css'] colors_256 = get_256_colors(self) templates_path = os.path.join(PLUGIN_PATH, "templates") recording_template_path = os.path.join( templates_path, "self_contained_recording.html") #with open(recording_template_path) as f: #recording_template_data = f.read() extra_theme_path = os.path.join(templates_path,'themes/black.css') with io.open(extra_theme_path, mode='r',encoding='UTF-8') as f: extra_theme = f.read() rendered_recording = render_string(recording_template_path,**dict(recording=recording, container=container, prefix=prefix, theme=theme_css, colors=colors_css, colors_256=colors_256, extra_theme=extra_theme))#extra_theme to fix bug #recording_template = tornado.template.Template(recording_template_data) #rendered_recording = recording_template.generate( #recording=recording, #container=container, #prefix=prefix, #theme=theme_css, #colors=colors_css, #colors_256=colors_256 #) out_dict['data'] = rendered_recording message = {'go:save_file': out_dict} self.write_message(message) hooks = { 'WebSocket': { 'terminal:playback_save_recording': save_recording, } }<|fim▁end|>
<|file_name|>BreathFirstSearch.java<|end_file_name|><|fim▁begin|>package com.albion.common.graph.algorithms; import com.albion.common.graph.core.v1.Edge; import com.albion.common.graph.core.v1.Graph; import com.albion.common.graph.core.v1.Vertex; import java.util.ArrayList; import java.util.List; public class BreathFirstSearch { public static Vertex locate(Graph graph, Integer source, Integer target){ List<Vertex> queue = new ArrayList<>(); Vertex root = graph.getVertex(source); queue.add(root); while(!queue.isEmpty()){ Vertex v = queue.remove(0); if(v.getId() == target.intValue()){ v.setVisited(true);<|fim▁hole|> List<Edge> edgeList = v.getEdgeList(); for(Edge edge : edgeList){ int vertexId = edge.getY(); Vertex w = graph.getVerticesMap().get(vertexId); if(w.isVisited() == false){ w.setVisited(true); queue.add(w); } } } return null; } }<|fim▁end|>
return v; }
<|file_name|>serverchart.js<|end_file_name|><|fim▁begin|>// Copyright 2007 The Closure Library Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /** * @fileoverview Component for generating chart PNGs using Google Chart Server. * * @see ../demos/serverchart.html */ /** * Namespace for chart functions */ goog.provide('goog.ui.ServerChart'); goog.provide('goog.ui.ServerChart.AxisDisplayType'); goog.provide('goog.ui.ServerChart.ChartType'); goog.provide('goog.ui.ServerChart.EncodingType'); goog.provide('goog.ui.ServerChart.Event'); goog.provide('goog.ui.ServerChart.LegendPosition'); goog.provide('goog.ui.ServerChart.MaximumValue'); goog.provide('goog.ui.ServerChart.MultiAxisAlignment'); goog.provide('goog.ui.ServerChart.MultiAxisType'); goog.provide('goog.ui.ServerChart.UriParam'); goog.provide('goog.ui.ServerChart.UriTooLongEvent'); goog.require('goog.Uri'); goog.require('goog.array'); goog.require('goog.asserts'); goog.require('goog.events.Event'); goog.require('goog.string'); goog.require('goog.ui.Component'); /** * Will construct a chart using Google's chartserver. * * @param {goog.ui.ServerChart.ChartType} type The chart type. * @param {number=} opt_width The width of the chart. * @param {number=} opt_height The height of the chart. * @param {goog.dom.DomHelper=} opt_domHelper Optional DOM Helper. * @param {string=} opt_uri Optional uri used to connect to the chart server, if * different than goog.ui.ServerChart.CHART_SERVER_SCHEME_INDEPENDENT_URI. * @constructor * @extends {goog.ui.Component} */ goog.ui.ServerChart = function(type, opt_width, opt_height, opt_domHelper, opt_uri) { goog.ui.Component.call(this, opt_domHelper); /** * Image URI. * @type {goog.Uri} * @private */ this.uri_ = new goog.Uri( opt_uri || goog.ui.ServerChart.CHART_SERVER_SCHEME_INDEPENDENT_URI); /** * Encoding method for the URI data format. * @type {goog.ui.ServerChart.EncodingType} * @private */ this.encodingType_ = goog.ui.ServerChart.EncodingType.AUTOMATIC; /** * Two-dimensional array of the data sets on the chart. * @type {Array.<Array.<number>>} * @private */ this.dataSets_ = []; /** * Colors for each data set. * @type {Array.<string>} * @private */ this.setColors_ = []; /** * Legend texts for each data set. * @type {Array.<string>} * @private */ this.setLegendTexts_ = []; /** * Labels on the X-axis. * @type {Array.<string>} * @private */ this.xLabels_ = []; /** * Labels on the left along the Y-axis. * @type {Array.<string>} * @private */ this.leftLabels_ = []; /** * Labels on the right along the Y-axis. * @type {Array.<string>} * @private */ this.rightLabels_ = []; /** * Axis type for each multi-axis in the chart. The indices into this array * also work as the reference index for all other multi-axis properties. * @type {Array.<goog.ui.ServerChart.MultiAxisType>} * @private */ this.multiAxisType_ = []; /** * Axis text for each multi-axis in the chart, indexed by the indices from * multiAxisType_ in a sparse array. * @type {Object} * @private */ this.multiAxisLabelText_ = {}; /** * Axis position for each multi-axis in the chart, indexed by the indices * from multiAxisType_ in a sparse array. * @type {Object} * @private */ this.multiAxisLabelPosition_ = {}; /** * Axis range for each multi-axis in the chart, indexed by the indices from * multiAxisType_ in a sparse array. * @type {Object} * @private */ this.multiAxisRange_ = {}; /** * Axis style for each multi-axis in the chart, indexed by the indices from * multiAxisType_ in a sparse array. * @type {Object} * @private */ this.multiAxisLabelStyle_ = {}; this.setType(type); this.setSize(opt_width, opt_height); /** * Minimum value for the chart (used for normalization). By default, * this is set to infinity, and is eventually updated to the lowest given * value in the data. The minimum value is then subtracted from all other * values. For a pie chart, subtracting the minimum value does not make * sense, so minValue_ is set to zero because 0 is the additive identity. * @type {number} * @private */ this.minValue_ = this.isPieChart() ? 0 : Infinity; }; goog.inherits(goog.ui.ServerChart, goog.ui.Component); /** * Base scheme-independent URI for the chart renderer. * @type {string} */ goog.ui.ServerChart.CHART_SERVER_SCHEME_INDEPENDENT_URI = '//chart.googleapis.com/chart'; /** * Base HTTP URI for the chart renderer. * @type {string} */ goog.ui.ServerChart.CHART_SERVER_HTTP_URI = 'http://chart.googleapis.com/chart'; /** * Base HTTPS URI for the chart renderer. * @type {string} */ goog.ui.ServerChart.CHART_SERVER_HTTPS_URI = 'https://chart.googleapis.com/chart'; /** * Base URI for the chart renderer. * @type {string} * @deprecated Use * {@link goog.ui.ServerChart.CHART_SERVER_SCHEME_INDEPENDENT_URI}, * {@link goog.ui.ServerChart.CHART_SERVER_HTTP_URI} or * {@link goog.ui.ServerChart.CHART_SERVER_HTTPS_URI} instead. */ goog.ui.ServerChart.CHART_SERVER_URI = goog.ui.ServerChart.CHART_SERVER_HTTP_URI; /** * The 0 - 1.0 ("fraction of the range") value to use when getMinValue() == * getMaxValue(). This determines, for example, the vertical position * of the line in a flat line-chart. * @type {number} */ goog.ui.ServerChart.DEFAULT_NORMALIZATION = 0.5; /** * The upper limit on the length of the chart image URI, after encoding. * If the URI's length equals or exceeds it, goog.ui.ServerChart.UriTooLongEvent * is dispatched on the goog.ui.ServerChart object. * @type {number} * @private */ goog.ui.ServerChart.prototype.uriLengthLimit_ = 2048; /** * Number of gridlines along the X-axis. * @type {number} * @private */ goog.ui.ServerChart.prototype.gridX_ = 0; /** * Number of gridlines along the Y-axis. * @type {number} * @private */ goog.ui.ServerChart.prototype.gridY_ = 0; /** * Maximum value for the chart (used for normalization). The minimum is * declared in the constructor. * @type {number} * @private */ goog.ui.ServerChart.prototype.maxValue_ = -Infinity; /** * Chart title. * @type {?string} * @private */ goog.ui.ServerChart.prototype.title_ = null; /** * Chart title size. * @type {number} * @private */ goog.ui.ServerChart.prototype.titleSize_ = 13.5; /** * Chart title color. * @type {string} * @private */ goog.ui.ServerChart.prototype.titleColor_ = '333333'; /** * Chart legend. * @type {Array.<string>?} * @private */ goog.ui.ServerChart.prototype.legend_ = null; /** * ChartServer supports using data sets to position markers. A data set * that is being used for positioning only can be made "invisible", in other * words, the caller can indicate to ChartServer that ordinary chart elements * (e.g. bars in a bar chart) should not be drawn on the data points of the * invisible data set. Such data sets must be provided at the end of the * chd parameter, and if invisible data sets are being used, the chd * parameter must indicate the number of visible data sets. * @type {?number} * @private */ goog.ui.ServerChart.prototype.numVisibleDataSets_ = null; /** * Creates the DOM node (image) needed for the Chart * @override */ goog.ui.ServerChart.prototype.createDom = function() { var size = this.getSize(); this.setElementInternal(this.getDomHelper().createDom(<|fim▁hole|> 'width': size[0], 'height': size[1]})); }; /** * Decorate an image already in the DOM. * Expects the following structure: * <pre> * - img * </pre> * * @param {Element} img Image to decorate. */ goog.ui.ServerChart.prototype.decorateInternal = function(img) { img.src = this.getUri(); this.setElementInternal(img); }; /** * Updates the image if any of the data or settings have changed. */ goog.ui.ServerChart.prototype.updateChart = function() { if (this.getElement()) { this.getElement().src = this.getUri(); } }; /** * Sets the URI of the chart. * * @param {goog.Uri} uri The chart URI. */ goog.ui.ServerChart.prototype.setUri = function(uri) { this.uri_ = uri; }; /** * Returns the URI of the chart. * * @return {goog.Uri} The chart URI. */ goog.ui.ServerChart.prototype.getUri = function() { this.computeDataString_(); return this.uri_; }; /** * Returns the upper limit on the length of the chart image URI, after encoding. * If the URI's length equals or exceeds it, goog.ui.ServerChart.UriTooLongEvent * is dispatched on the goog.ui.ServerChart object. * * @return {number} The chart URI length limit. */ goog.ui.ServerChart.prototype.getUriLengthLimit = function() { return this.uriLengthLimit_; }; /** * Sets the upper limit on the length of the chart image URI, after encoding. * If the URI's length equals or exceeds it, goog.ui.ServerChart.UriTooLongEvent * is dispatched on the goog.ui.ServerChart object. * * @param {number} uriLengthLimit The chart URI length limit. */ goog.ui.ServerChart.prototype.setUriLengthLimit = function(uriLengthLimit) { this.uriLengthLimit_ = uriLengthLimit; }; /** * Sets the 'chg' parameter of the chart Uri. * This is used by various types of charts to specify Grids. * * @param {string} value Value for the 'chg' parameter in the chart Uri. */ goog.ui.ServerChart.prototype.setGridParameter = function(value) { this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.GRID, value); }; /** * Returns the 'chg' parameter of the chart Uri. * This is used by various types of charts to specify Grids. * * @return {string|undefined} The 'chg' parameter of the chart Uri. */ goog.ui.ServerChart.prototype.getGridParameter = function() { return /** @type {string} */ ( this.uri_.getParameterValue(goog.ui.ServerChart.UriParam.GRID)); }; /** * Sets the 'chm' parameter of the chart Uri. * This is used by various types of charts to specify Markers. * * @param {string} value Value for the 'chm' parameter in the chart Uri. */ goog.ui.ServerChart.prototype.setMarkerParameter = function(value) { this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.MARKERS, value); }; /** * Returns the 'chm' parameter of the chart Uri. * This is used by various types of charts to specify Markers. * * @return {string|undefined} The 'chm' parameter of the chart Uri. */ goog.ui.ServerChart.prototype.getMarkerParameter = function() { return /** @type {string} */ ( this.uri_.getParameterValue(goog.ui.ServerChart.UriParam.MARKERS)); }; /** * Sets the 'chp' parameter of the chart Uri. * This is used by various types of charts to specify certain options. * e.g., finance charts use this to designate which line is the 0 axis. * * @param {string|number} value Value for the 'chp' parameter in the chart Uri. */ goog.ui.ServerChart.prototype.setMiscParameter = function(value) { this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.MISC_PARAMS, String(value)); }; /** * Returns the 'chp' parameter of the chart Uri. * This is used by various types of charts to specify certain options. * e.g., finance charts use this to designate which line is the 0 axis. * * @return {string|undefined} The 'chp' parameter of the chart Uri. */ goog.ui.ServerChart.prototype.getMiscParameter = function() { return /** @type {string} */ ( this.uri_.getParameterValue(goog.ui.ServerChart.UriParam.MISC_PARAMS)); }; /** * Enum of chart data encoding types * * @enum {string} */ goog.ui.ServerChart.EncodingType = { AUTOMATIC: '', EXTENDED: 'e', SIMPLE: 's', TEXT: 't' }; /** * Enum of chart types with their short names used by the chartserver. * * @enum {string} */ goog.ui.ServerChart.ChartType = { BAR: 'br', CLOCK: 'cf', CONCENTRIC_PIE: 'pc', FILLEDLINE: 'lr', FINANCE: 'lfi', GOOGLEOMETER: 'gom', HORIZONTAL_GROUPED_BAR: 'bhg', HORIZONTAL_STACKED_BAR: 'bhs', LINE: 'lc', MAP: 't', MAPUSA: 'tuss', MAPWORLD: 'twoc', PIE: 'p', PIE3D: 'p3', RADAR: 'rs', SCATTER: 's', SPARKLINE: 'ls', VENN: 'v', VERTICAL_GROUPED_BAR: 'bvg', VERTICAL_STACKED_BAR: 'bvs', XYLINE: 'lxy' }; /** * Enum of multi-axis types. * * @enum {string} */ goog.ui.ServerChart.MultiAxisType = { X_AXIS: 'x', LEFT_Y_AXIS: 'y', RIGHT_Y_AXIS: 'r', TOP_AXIS: 't' }; /** * Enum of multi-axis alignments. * * @enum {number} */ goog.ui.ServerChart.MultiAxisAlignment = { ALIGN_LEFT: -1, ALIGN_CENTER: 0, ALIGN_RIGHT: 1 }; /** * Enum of legend positions. * * @enum {string} */ goog.ui.ServerChart.LegendPosition = { TOP: 't', BOTTOM: 'b', LEFT: 'l', RIGHT: 'r' }; /** * Enum of line and tick options for an axis. * * @enum {string} */ goog.ui.ServerChart.AxisDisplayType = { LINE_AND_TICKS: 'lt', LINE: 'l', TICKS: 't' }; /** * Enum of chart maximum values in pixels, as listed at: * http://code.google.com/apis/chart/basics.html * * @enum {number} */ goog.ui.ServerChart.MaximumValue = { WIDTH: 1000, HEIGHT: 1000, MAP_WIDTH: 440, MAP_HEIGHT: 220, TOTAL_AREA: 300000 }; /** * Enum of ChartServer URI parameters. * * @enum {string} */ goog.ui.ServerChart.UriParam = { BACKGROUND_FILL: 'chf', BAR_HEIGHT: 'chbh', DATA: 'chd', DATA_COLORS: 'chco', DATA_LABELS: 'chld', DATA_SCALING: 'chds', DIGITAL_SIGNATURE: 'sig', GEOGRAPHICAL_REGION: 'chtm', GRID: 'chg', LABEL_COLORS: 'chlc', LEFT_Y_LABELS: 'chly', LEGEND: 'chdl', LEGEND_POSITION: 'chdlp', LEGEND_TEXTS: 'chdl', LINE_STYLES: 'chls', MARGINS: 'chma', MARKERS: 'chm', MISC_PARAMS: 'chp', MULTI_AXIS_LABEL_POSITION: 'chxp', MULTI_AXIS_LABEL_TEXT: 'chxl', MULTI_AXIS_RANGE: 'chxr', MULTI_AXIS_STYLE: 'chxs', MULTI_AXIS_TYPES: 'chxt', RIGHT_LABELS: 'chlr', RIGHT_LABEL_POSITIONS: 'chlrp', SIZE: 'chs', TITLE: 'chtt', TITLE_FORMAT: 'chts', TYPE: 'cht', X_AXIS_STYLE: 'chx', X_LABELS: 'chl' }; /** * Sets the background fill. * * @param {Array.<Object>} fill An array of background fill specification * objects. Each object may have the following properties: * {string} area The area to fill, either 'bg' for background or 'c' for * chart area. The default is 'bg'. * {string} color (required) The color of the background fill. * // TODO(user): Add support for gradient/stripes, which requires * // a different object structure. */ goog.ui.ServerChart.prototype.setBackgroundFill = function(fill) { var value = []; goog.array.forEach(fill, function(spec) { spec.area = spec.area || 'bg'; spec.effect = spec.effect || 's'; value.push([spec.area, spec.effect, spec.color].join(',')); }); value = value.join('|'); this.setParameterValue(goog.ui.ServerChart.UriParam.BACKGROUND_FILL, value); }; /** * Returns the background fill. * * @return {Array.<Object>} An array of background fill specifications. * If the fill specification string is in an unsupported format, the method * returns an empty array. */ goog.ui.ServerChart.prototype.getBackgroundFill = function() { var value = this.uri_.getParameterValue(goog.ui.ServerChart.UriParam.BACKGROUND_FILL); var result = []; if (goog.isDefAndNotNull(value)) { var fillSpecifications = value.split('|'); var valid = true; goog.array.forEach(fillSpecifications, function(spec) { spec = spec.split(','); if (valid && spec[1] == 's') { result.push({area: spec[0], effect: spec[1], color: spec[2]}); } else { // If the format is unsupported, return an empty array. result = []; valid = false; } }); } return result; }; /** * Sets the encoding type. * * @param {goog.ui.ServerChart.EncodingType} type Desired data encoding type. */ goog.ui.ServerChart.prototype.setEncodingType = function(type) { this.encodingType_ = type; }; /** * Gets the encoding type. * * @return {goog.ui.ServerChart.EncodingType} The encoding type. */ goog.ui.ServerChart.prototype.getEncodingType = function() { return this.encodingType_; }; /** * Sets the chart type. * * @param {goog.ui.ServerChart.ChartType} type The desired chart type. */ goog.ui.ServerChart.prototype.setType = function(type) { this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.TYPE, type); }; /** * Returns the chart type. * * @return {goog.ui.ServerChart.ChartType} The chart type. */ goog.ui.ServerChart.prototype.getType = function() { return /** @type {goog.ui.ServerChart.ChartType} */ ( this.uri_.getParameterValue(goog.ui.ServerChart.UriParam.TYPE)); }; /** * Sets the chart size. * * @param {number=} opt_width Optional chart width, defaults to 300. * @param {number=} opt_height Optional chart height, defaults to 150. */ goog.ui.ServerChart.prototype.setSize = function(opt_width, opt_height) { var sizeString = [opt_width || 300, opt_height || 150].join('x'); this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.SIZE, sizeString); }; /** * Returns the chart size. * * @return {Array.<string>} [Width, Height]. */ goog.ui.ServerChart.prototype.getSize = function() { var sizeStr = this.uri_.getParameterValue(goog.ui.ServerChart.UriParam.SIZE); return sizeStr.split('x'); }; /** * Sets the minimum value of the chart. * * @param {number} minValue The minimum value of the chart. */ goog.ui.ServerChart.prototype.setMinValue = function(minValue) { this.minValue_ = minValue; }; /** * @return {number} The minimum value of the chart. */ goog.ui.ServerChart.prototype.getMinValue = function() { return this.minValue_; }; /** * Sets the maximum value of the chart. * * @param {number} maxValue The maximum value of the chart. */ goog.ui.ServerChart.prototype.setMaxValue = function(maxValue) { this.maxValue_ = maxValue; }; /** * @return {number} The maximum value of the chart. */ goog.ui.ServerChart.prototype.getMaxValue = function() { return this.maxValue_; }; /** * Sets the chart margins. * * @param {number} leftMargin The size in pixels of the left margin. * @param {number} rightMargin The size in pixels of the right margin. * @param {number} topMargin The size in pixels of the top margin. * @param {number} bottomMargin The size in pixels of the bottom margin. */ goog.ui.ServerChart.prototype.setMargins = function(leftMargin, rightMargin, topMargin, bottomMargin) { var margins = [leftMargin, rightMargin, topMargin, bottomMargin].join(','); var UriParam = goog.ui.ServerChart.UriParam; this.uri_.setParameterValue(UriParam.MARGINS, margins); }; /** * Sets the number of grid lines along the X-axis. * * @param {number} gridlines The number of X-axis grid lines. */ goog.ui.ServerChart.prototype.setGridX = function(gridlines) { // Need data for this to work. this.gridX_ = gridlines; this.setGrids_(this.gridX_, this.gridY_); }; /** * @return {number} The number of gridlines along the X-axis. */ goog.ui.ServerChart.prototype.getGridX = function() { return this.gridX_; }; /** * Sets the number of grid lines along the Y-axis. * * @param {number} gridlines The number of Y-axis grid lines. */ goog.ui.ServerChart.prototype.setGridY = function(gridlines) { // Need data for this to work. this.gridY_ = gridlines; this.setGrids_(this.gridX_, this.gridY_); }; /** * @return {number} The number of gridlines along the Y-axis. */ goog.ui.ServerChart.prototype.getGridY = function() { return this.gridY_; }; /** * Sets the grids for the chart * * @private * @param {number} x The number of grid lines along the x-axis. * @param {number} y The number of grid lines along the y-axis. */ goog.ui.ServerChart.prototype.setGrids_ = function(x, y) { var gridArray = [x == 0 ? 0 : 100 / x, y == 0 ? 0 : 100 / y]; this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.GRID, gridArray.join(',')); }; /** * Sets the X Labels for the chart. * * @param {Array.<string>} labels The X Labels for the chart. */ goog.ui.ServerChart.prototype.setXLabels = function(labels) { this.xLabels_ = labels; this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.X_LABELS, this.xLabels_.join('|')); }; /** * @return {Array.<string>} The X Labels for the chart. */ goog.ui.ServerChart.prototype.getXLabels = function() { return this.xLabels_; }; /** * @return {boolean} Whether the chart is a bar chart. */ goog.ui.ServerChart.prototype.isBarChart = function() { var type = this.getType(); return type == goog.ui.ServerChart.ChartType.BAR || type == goog.ui.ServerChart.ChartType.HORIZONTAL_GROUPED_BAR || type == goog.ui.ServerChart.ChartType.HORIZONTAL_STACKED_BAR || type == goog.ui.ServerChart.ChartType.VERTICAL_GROUPED_BAR || type == goog.ui.ServerChart.ChartType.VERTICAL_STACKED_BAR; }; /** * @return {boolean} Whether the chart is a pie chart. */ goog.ui.ServerChart.prototype.isPieChart = function() { var type = this.getType(); return type == goog.ui.ServerChart.ChartType.PIE || type == goog.ui.ServerChart.ChartType.PIE3D || type == goog.ui.ServerChart.ChartType.CONCENTRIC_PIE; }; /** * @return {boolean} Whether the chart is a grouped bar chart. */ goog.ui.ServerChart.prototype.isGroupedBarChart = function() { var type = this.getType(); return type == goog.ui.ServerChart.ChartType.HORIZONTAL_GROUPED_BAR || type == goog.ui.ServerChart.ChartType.VERTICAL_GROUPED_BAR; }; /** * @return {boolean} Whether the chart is a horizontal bar chart. */ goog.ui.ServerChart.prototype.isHorizontalBarChart = function() { var type = this.getType(); return type == goog.ui.ServerChart.ChartType.BAR || type == goog.ui.ServerChart.ChartType.HORIZONTAL_GROUPED_BAR || type == goog.ui.ServerChart.ChartType.HORIZONTAL_STACKED_BAR; }; /** * @return {boolean} Whether the chart is a line chart. */ goog.ui.ServerChart.prototype.isLineChart = function() { var type = this.getType(); return type == goog.ui.ServerChart.ChartType.FILLEDLINE || type == goog.ui.ServerChart.ChartType.LINE || type == goog.ui.ServerChart.ChartType.SPARKLINE || type == goog.ui.ServerChart.ChartType.XYLINE; }; /** * @return {boolean} Whether the chart is a map. */ goog.ui.ServerChart.prototype.isMap = function() { var type = this.getType(); return type == goog.ui.ServerChart.ChartType.MAP || type == goog.ui.ServerChart.ChartType.MAPUSA || type == goog.ui.ServerChart.ChartType.MAPWORLD; }; /** * @return {boolean} Whether the chart is a stacked bar chart. */ goog.ui.ServerChart.prototype.isStackedBarChart = function() { var type = this.getType(); return type == goog.ui.ServerChart.ChartType.BAR || type == goog.ui.ServerChart.ChartType.HORIZONTAL_STACKED_BAR || type == goog.ui.ServerChart.ChartType.VERTICAL_STACKED_BAR; }; /** * @return {boolean} Whether the chart is a vertical bar chart. */ goog.ui.ServerChart.prototype.isVerticalBarChart = function() { var type = this.getType(); return type == goog.ui.ServerChart.ChartType.VERTICAL_GROUPED_BAR || type == goog.ui.ServerChart.ChartType.VERTICAL_STACKED_BAR; }; /** * Sets the Left Labels for the chart. * NOTE: The array should start with the lowest value, and then * move progessively up the axis. So if you want labels * from 0 to 100 with 0 at bottom of the graph, then you would * want to pass something like [0,25,50,75,100]. * * @param {Array.<string>} labels The Left Labels for the chart. */ goog.ui.ServerChart.prototype.setLeftLabels = function(labels) { this.leftLabels_ = labels; this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.LEFT_Y_LABELS, this.leftLabels_.reverse().join('|')); }; /** * @return {Array.<string>} The Left Labels for the chart. */ goog.ui.ServerChart.prototype.getLeftLabels = function() { return this.leftLabels_; }; /** * Sets the given ChartServer parameter. * * @param {goog.ui.ServerChart.UriParam} key The ChartServer parameter to set. * @param {string} value The value to set for the ChartServer parameter. */ goog.ui.ServerChart.prototype.setParameterValue = function(key, value) { this.uri_.setParameterValue(key, value); }; /** * Removes the given ChartServer parameter. * * @param {goog.ui.ServerChart.UriParam} key The ChartServer parameter to * remove. */ goog.ui.ServerChart.prototype.removeParameter = function(key) { this.uri_.removeParameter(key); }; /** * Sets the Right Labels for the chart. * NOTE: The array should start with the lowest value, and then * move progessively up the axis. So if you want labels * from 0 to 100 with 0 at bottom of the graph, then you would * want to pass something like [0,25,50,75,100]. * * @param {Array.<string>} labels The Right Labels for the chart. */ goog.ui.ServerChart.prototype.setRightLabels = function(labels) { this.rightLabels_ = labels; this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.RIGHT_LABELS, this.rightLabels_.reverse().join('|')); }; /** * @return {Array.<string>} The Right Labels for the chart. */ goog.ui.ServerChart.prototype.getRightLabels = function() { return this.rightLabels_; }; /** * Sets the position relative to the chart where the legend is to be displayed. * * @param {goog.ui.ServerChart.LegendPosition} value Legend position. */ goog.ui.ServerChart.prototype.setLegendPosition = function(value) { this.uri_.setParameterValue( goog.ui.ServerChart.UriParam.LEGEND_POSITION, value); }; /** * Returns the position relative to the chart where the legend is to be * displayed. * * @return {goog.ui.ServerChart.LegendPosition} Legend position. */ goog.ui.ServerChart.prototype.getLegendPosition = function() { return /** @type {goog.ui.ServerChart.LegendPosition} */ ( this.uri_.getParameterValue( goog.ui.ServerChart.UriParam.LEGEND_POSITION)); }; /** * Sets the number of "visible" data sets. All data sets that come after * the visible data set are not drawn as part of the chart. Instead, they * are available for positioning markers. * @param {?number} n The number of visible data sets, or null if all data * sets are to be visible. */ goog.ui.ServerChart.prototype.setNumVisibleDataSets = function(n) { this.numVisibleDataSets_ = n; }; /** * Returns the number of "visible" data sets. All data sets that come after * the visible data set are not drawn as part of the chart. Instead, they * are available for positioning markers. * * @return {?number} The number of visible data sets, or null if all data * sets are visible. */ goog.ui.ServerChart.prototype.getNumVisibleDataSets = function() { return this.numVisibleDataSets_; }; /** * Sets the weight function for a Venn Diagram along with the associated * colors and legend text. Weights are assigned as follows: * weights[0] is relative area of circle A. * weights[1] is relative area of circle B. * weights[2] is relative area of circle C. * weights[3] is relative area of overlap of circles A and B. * weights[4] is relative area of overlap of circles A and C. * weights[5] is relative area of overlap of circles B and C. * weights[6] is relative area of overlap of circles A, B and C. * For a two circle Venn Diagram the weights are assigned as follows: * weights[0] is relative area of circle A. * weights[1] is relative area of circle B. * weights[2] is relative area of overlap of circles A and B. * * @param {Array.<number>} weights The relative weights of the circles. * @param {Array.<string>=} opt_legendText The legend labels for the circles. * @param {Array.<string>=} opt_colors The colors for the circles. */ goog.ui.ServerChart.prototype.setVennSeries = function( weights, opt_legendText, opt_colors) { if (this.getType() != goog.ui.ServerChart.ChartType.VENN) { throw Error('Can only set a weight function for a Venn diagram.'); } var dataMin = this.arrayMin_(weights); if (dataMin < this.minValue_) { this.minValue_ = dataMin; } var dataMax = this.arrayMax_(weights); if (dataMax > this.maxValue_) { this.maxValue_ = dataMax; } if (goog.isDef(opt_legendText)) { goog.array.forEach( opt_legendText, goog.bind(function(legend) { this.setLegendTexts_.push(legend); }, this)); this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.LEGEND_TEXTS, this.setLegendTexts_.join('|')); } // If the caller only gave three weights, then they wanted a two circle // Venn Diagram. Create a 3 circle weight function where circle C has // area zero. if (weights.length == 3) { weights[3] = weights[2]; weights[2] = 0.0; } this.dataSets_.push(weights); if (goog.isDef(opt_colors)) { goog.array.forEach(opt_colors, goog.bind(function(color) { this.setColors_.push(color); }, this)); this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.DATA_COLORS, this.setColors_.join(',')); } }; /** * Sets the title of the chart. * * @param {string} title The chart title. */ goog.ui.ServerChart.prototype.setTitle = function(title) { this.title_ = title; this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.TITLE, this.title_.replace(/\n/g, '|')); }; /** * Sets the size of the chart title. * * @param {number} size The title size, in points. */ goog.ui.ServerChart.prototype.setTitleSize = function(size) { this.titleSize_ = size; this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.TITLE_FORMAT, this.titleColor_ + ',' + this.titleSize_); }; /** * @return {number} size The title size, in points. */ goog.ui.ServerChart.prototype.getTitleSize = function() { return this.titleSize_; }; /** * Sets the color of the chart title. * * NOTE: The color string should NOT have a '#' at the beginning of it. * * @param {string} color The hex value for the title color. */ goog.ui.ServerChart.prototype.setTitleColor = function(color) { this.titleColor_ = color; this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.TITLE_FORMAT, this.titleColor_ + ',' + this.titleSize_); }; /** * @return {string} color The hex value for the title color. */ goog.ui.ServerChart.prototype.getTitleColor = function() { return this.titleColor_; }; /** * Adds a legend to the chart. * * @param {Array.<string>} legend The legend to add. */ goog.ui.ServerChart.prototype.setLegend = function(legend) { this.legend_ = legend; this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.LEGEND, this.legend_.join('|')); }; /** * Sets the data scaling. * NOTE: This also changes the encoding type because data scaling will * only work with {@code goog.ui.ServerChart.EncodingType.TEXT} * encoding. * @param {number} minimum The lowest number to apply to the data. * @param {number} maximum The highest number to apply to the data. */ goog.ui.ServerChart.prototype.setDataScaling = function(minimum, maximum) { this.encodingType_ = goog.ui.ServerChart.EncodingType.TEXT; this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.DATA_SCALING, minimum + ',' + maximum); }; /** * Sets the widths of the bars and the spaces between the bars in a bar * chart. * NOTE: If the space between groups is specified but the space between * bars is left undefined, the space between groups will be interpreted * as the space between bars because this is the behavior exposed * in the external developers guide. * @param {number} barWidth The width of a bar in pixels. * @param {number=} opt_spaceBars The width of the space between * bars in a group in pixels. * @param {number=} opt_spaceGroups The width of the space between * groups. */ goog.ui.ServerChart.prototype.setBarSpaceWidths = function(barWidth, opt_spaceBars, opt_spaceGroups) { var widths = [barWidth]; if (goog.isDef(opt_spaceBars)) { widths.push(opt_spaceBars); } if (goog.isDef(opt_spaceGroups)) { widths.push(opt_spaceGroups); } this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.BAR_HEIGHT, widths.join(',')); }; /** * Specifies that the bar width in a bar chart should be calculated * automatically given the space available in the chart, while optionally * setting the spaces between the bars. * NOTE: If the space between groups is specified but the space between * bars is left undefined, the space between groups will be interpreted * as the space between bars because this is the behavior exposed * in the external developers guide. * @param {number=} opt_spaceBars The width of the space between * bars in a group in pixels. * @param {number=} opt_spaceGroups The width of the space between * groups. */ goog.ui.ServerChart.prototype.setAutomaticBarWidth = function(opt_spaceBars, opt_spaceGroups) { var widths = ['a']; if (goog.isDef(opt_spaceBars)) { widths.push(opt_spaceBars); } if (goog.isDef(opt_spaceGroups)) { widths.push(opt_spaceGroups); } this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.BAR_HEIGHT, widths.join(',')); }; /** * Adds a multi-axis to the chart, and sets its type. Multiple axes of the same * type can be added. * * @param {goog.ui.ServerChart.MultiAxisType} axisType The desired axis type. * @return {number} The index of the newly inserted axis, suitable for feeding * to the setMultiAxis*() functions. */ goog.ui.ServerChart.prototype.addMultiAxis = function(axisType) { this.multiAxisType_.push(axisType); this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.MULTI_AXIS_TYPES, this.multiAxisType_.join(',')); return this.multiAxisType_.length - 1; }; /** * Returns the axis type for the given axis, or all of them in an array if the * axis number is not given. * * @param {number=} opt_axisNumber The axis index, as returned by addMultiAxis. * @return {goog.ui.ServerChart.MultiAxisType| * Array.<goog.ui.ServerChart.MultiAxisType>} * The axis type for the given axis, or all of them in an array if the * axis number is not given. */ goog.ui.ServerChart.prototype.getMultiAxisType = function(opt_axisNumber) { if (goog.isDef(opt_axisNumber)) { return this.multiAxisType_[opt_axisNumber]; } return this.multiAxisType_; }; /** * Sets the label text (usually multiple values) for a given axis, overwriting * any existing values. * * @param {number} axisNumber The axis index, as returned by addMultiAxis. * @param {Array.<string>} labelText The actual label text to be added. */ goog.ui.ServerChart.prototype.setMultiAxisLabelText = function(axisNumber, labelText) { this.multiAxisLabelText_[axisNumber] = labelText; var axisString = this.computeMultiAxisDataString_(this.multiAxisLabelText_, ':|', '|', '|'); this.uri_.setParameterValue( goog.ui.ServerChart.UriParam.MULTI_AXIS_LABEL_TEXT, axisString); }; /** * Returns the label text, or all of them in a two-dimensional array if the * axis number is not given. * * @param {number=} opt_axisNumber The axis index, as returned by addMultiAxis. * @return {Object|Array.<string>} The label text, or all of them in a * two-dimensional array if the axis number is not given. */ goog.ui.ServerChart.prototype.getMultiAxisLabelText = function(opt_axisNumber) { if (goog.isDef(opt_axisNumber)) { return this.multiAxisLabelText_[opt_axisNumber]; } return this.multiAxisLabelText_; }; /** * Sets the label positions for a given axis, overwriting any existing values. * The label positions are assumed to be floating-point numbers within the * range of the axis. * * @param {number} axisNumber The axis index, as returned by addMultiAxis. * @param {Array.<number>} labelPosition The actual label positions to be added. */ goog.ui.ServerChart.prototype.setMultiAxisLabelPosition = function( axisNumber, labelPosition) { this.multiAxisLabelPosition_[axisNumber] = labelPosition; var positionString = this.computeMultiAxisDataString_( this.multiAxisLabelPosition_, ',', ',', '|'); this.uri_.setParameterValue( goog.ui.ServerChart.UriParam.MULTI_AXIS_LABEL_POSITION, positionString); }; /** * Returns the label positions for a given axis number, or all of them in a * two-dimensional array if the axis number is not given. * * @param {number=} opt_axisNumber The axis index, as returned by addMultiAxis. * @return {Object|Array.<number>} The label positions for a given axis number, * or all of them in a two-dimensional array if the axis number is not * given. */ goog.ui.ServerChart.prototype.getMultiAxisLabelPosition = function(opt_axisNumber) { if (goog.isDef(opt_axisNumber)) { return this.multiAxisLabelPosition_[opt_axisNumber]; } return this.multiAxisLabelPosition_; }; /** * Sets the label range for a given axis, overwriting any existing range. * The default range is from 0 to 100. If the start value is larger than the * end value, the axis direction is reversed. rangeStart and rangeEnd must * be two different finite numbers. * * @param {number} axisNumber The axis index, as returned by addMultiAxis. * @param {number} rangeStart The new start of the range. * @param {number} rangeEnd The new end of the range. * @param {number=} opt_interval The interval between axis labels. */ goog.ui.ServerChart.prototype.setMultiAxisRange = function(axisNumber, rangeStart, rangeEnd, opt_interval) { goog.asserts.assert(rangeStart != rangeEnd, 'Range start and end cannot be the same value.'); goog.asserts.assert(isFinite(rangeStart) && isFinite(rangeEnd), 'Range start and end must be finite numbers.'); this.multiAxisRange_[axisNumber] = [rangeStart, rangeEnd]; if (goog.isDef(opt_interval)) { this.multiAxisRange_[axisNumber].push(opt_interval); } var rangeString = this.computeMultiAxisDataString_(this.multiAxisRange_, ',', ',', '|'); this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.MULTI_AXIS_RANGE, rangeString); }; /** * Returns the label range for a given axis number as a two-element array of * (range start, range end), or all of them in a two-dimensional array if the * axis number is not given. * * @param {number=} opt_axisNumber The axis index, as returned by addMultiAxis. * @return {Object|Array.<number>} The label range for a given axis number as a * two-element array of (range start, range end), or all of them in a * two-dimensional array if the axis number is not given. */ goog.ui.ServerChart.prototype.getMultiAxisRange = function(opt_axisNumber) { if (goog.isDef(opt_axisNumber)) { return this.multiAxisRange_[opt_axisNumber]; } return this.multiAxisRange_; }; /** * Sets the label style for a given axis, overwriting any existing style. * The default style is as follows: Default is x-axis labels are centered, left * hand y-axis labels are right aligned, right hand y-axis labels are left * aligned. The font size and alignment are optional parameters. * * NOTE: The color string should NOT have a '#' at the beginning of it. * * @param {number} axisNumber The axis index, as returned by addMultiAxis. * @param {string} color The hex value for this label's color. * @param {number=} opt_fontSize The label font size, in pixels. * @param {goog.ui.ServerChart.MultiAxisAlignment=} opt_alignment The label * alignment. * @param {goog.ui.ServerChart.AxisDisplayType=} opt_axisDisplay The axis * line and ticks. */ goog.ui.ServerChart.prototype.setMultiAxisLabelStyle = function(axisNumber, color, opt_fontSize, opt_alignment, opt_axisDisplay) { var style = [color]; if (goog.isDef(opt_fontSize) || goog.isDef(opt_alignment)) { style.push(opt_fontSize || ''); } if (goog.isDef(opt_alignment)) { style.push(opt_alignment); } if (opt_axisDisplay) { style.push(opt_axisDisplay); } this.multiAxisLabelStyle_[axisNumber] = style; var styleString = this.computeMultiAxisDataString_(this.multiAxisLabelStyle_, ',', ',', '|'); this.uri_.setParameterValue( goog.ui.ServerChart.UriParam.MULTI_AXIS_STYLE, styleString); }; /** * Returns the label style for a given axis number as a one- to three-element * array, or all of them in a two-dimensional array if the axis number is not * given. * * @param {number=} opt_axisNumber The axis index, as returned by addMultiAxis. * @return {Object|Array.<number>} The label style for a given axis number as a * one- to three-element array, or all of them in a two-dimensional array if * the axis number is not given. */ goog.ui.ServerChart.prototype.getMultiAxisLabelStyle = function(opt_axisNumber) { if (goog.isDef(opt_axisNumber)) { return this.multiAxisLabelStyle_[opt_axisNumber]; } return this.multiAxisLabelStyle_; }; /** * Adds a data set. * NOTE: The color string should NOT have a '#' at the beginning of it. * * @param {Array.<number|null>} data An array of numbers (values can be * NaN or null). * @param {string} color The hex value for this data set's color. * @param {string=} opt_legendText The legend text, if any, for this data * series. NOTE: If specified, all previously added data sets must also * have a legend text. */ goog.ui.ServerChart.prototype.addDataSet = function(data, color, opt_legendText) { var dataMin = this.arrayMin_(data); if (dataMin < this.minValue_) { this.minValue_ = dataMin; } var dataMax = this.arrayMax_(data); if (dataMax > this.maxValue_) { this.maxValue_ = dataMax; } if (goog.isDef(opt_legendText)) { if (this.setLegendTexts_.length < this.dataSets_.length) { throw Error('Cannot start adding legends text after first element.'); } this.setLegendTexts_.push(opt_legendText); this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.LEGEND_TEXTS, this.setLegendTexts_.join('|')); } this.dataSets_.push(data); this.setColors_.push(color); this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.DATA_COLORS, this.setColors_.join(',')); }; /** * Clears the data sets from the graph. All data, including the colors and * legend text, is cleared. */ goog.ui.ServerChart.prototype.clearDataSets = function() { var queryData = this.uri_.getQueryData(); queryData.remove(goog.ui.ServerChart.UriParam.LEGEND_TEXTS); queryData.remove(goog.ui.ServerChart.UriParam.DATA_COLORS); queryData.remove(goog.ui.ServerChart.UriParam.DATA); this.setLegendTexts_.length = 0; this.setColors_.length = 0; this.dataSets_.length = 0; }; /** * Returns the given data set or all of them in a two-dimensional array if * the set number is not given. * * @param {number=} opt_setNumber Optional data set number to get. * @return {Array} The given data set or all of them in a two-dimensional array * if the set number is not given. */ goog.ui.ServerChart.prototype.getData = function(opt_setNumber) { if (goog.isDef(opt_setNumber)) { return this.dataSets_[opt_setNumber]; } return this.dataSets_; }; /** * Computes the data string using the data in this.dataSets_ and sets * the object's URI accordingly. If the URI's length equals or exceeds the * limit, goog.ui.ServerChart.UriTooLongEvent is dispatched on the * goog.ui.ServerChart object. * @private */ goog.ui.ServerChart.prototype.computeDataString_ = function() { var ok; if (this.encodingType_ != goog.ui.ServerChart.EncodingType.AUTOMATIC) { ok = this.computeDataStringForEncoding_(this.encodingType_); } else { ok = this.computeDataStringForEncoding_( goog.ui.ServerChart.EncodingType.EXTENDED); if (!ok) { ok = this.computeDataStringForEncoding_( goog.ui.ServerChart.EncodingType.SIMPLE); } } if (!ok) { this.dispatchEvent( new goog.ui.ServerChart.UriTooLongEvent(this.uri_.toString())); } }; /** * Computes the data string using the data in this.dataSets_ and the encoding * specified by the encoding parameter, which must not be AUTOMATIC, and sets * the object's URI accordingly. * @param {goog.ui.ServerChart.EncodingType} encoding The data encoding to use; * must not be AUTOMATIC. * @return {boolean} False if the resulting URI is too long. * @private */ goog.ui.ServerChart.prototype.computeDataStringForEncoding_ = function( encoding) { var dataStrings = []; for (var i = 0, setLen = this.dataSets_.length; i < setLen; ++i) { dataStrings[i] = this.getChartServerValues_(this.dataSets_[i], this.minValue_, this.maxValue_, encoding); } var delimiter = encoding == goog.ui.ServerChart.EncodingType.TEXT ? '|' : ','; dataStrings = dataStrings.join(delimiter); var data; if (this.numVisibleDataSets_ == null) { data = goog.string.buildString(encoding, ':', dataStrings); } else { data = goog.string.buildString(encoding, this.numVisibleDataSets_, ':', dataStrings); } this.uri_.setParameterValue(goog.ui.ServerChart.UriParam.DATA, data); return this.uri_.toString().length < this.uriLengthLimit_; }; /** * Computes a multi-axis data string from the given data and separators. The * general data format for each index/element in the array will be * "<arrayIndex><indexSeparator><arrayElement.join(elementSeparator)>", with * axisSeparator used between multiple elements. * @param {Object} data The data to compute the data string for, as a * sparse array of arrays. NOTE: The function uses the length of * multiAxisType_ to determine the upper bound for the outer array. * @param {string} indexSeparator The separator string inserted between each * index and the data itself, commonly a comma (,). * @param {string} elementSeparator The separator string inserted between each * element inside each sub-array in the data, if there are more than one; * commonly a comma (,). * @param {string} axisSeparator The separator string inserted between each * axis specification, if there are more than one; usually a pipe sign (|). * @return {string} The multi-axis data string. * @private */ goog.ui.ServerChart.prototype.computeMultiAxisDataString_ = function( data, indexSeparator, elementSeparator, axisSeparator) { var elementStrings = []; for (var i = 0, setLen = this.multiAxisType_.length; i < setLen; ++i) { if (data[i]) { elementStrings.push(i + indexSeparator + data[i].join(elementSeparator)); } } return elementStrings.join(axisSeparator); }; /** * Array of possible ChartServer data values * @type {string} */ goog.ui.ServerChart.CHART_VALUES = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + 'abcdefghijklmnopqrstuvwxyz' + '0123456789'; /** * Array of extended ChartServer data values * @type {string} */ goog.ui.ServerChart.CHART_VALUES_EXTENDED = goog.ui.ServerChart.CHART_VALUES + '-.'; /** * Upper bound for extended values */ goog.ui.ServerChart.EXTENDED_UPPER_BOUND = Math.pow(goog.ui.ServerChart.CHART_VALUES_EXTENDED.length, 2) - 1; /** * Converts a single number to an encoded data value suitable for ChartServer. * The TEXT encoding is the number in decimal; the SIMPLE encoding is a single * character, and the EXTENDED encoding is two characters. See * http://code.google.com/apis/chart/docs/data_formats.html for the detailed * specification of these encoding formats. * * @private * @param {?number} value The value to convert (null for a missing data point). * @param {number} minValue The minimum value (used for normalization). * @param {number} maxValue The maximum value (used for normalization). * @param {goog.ui.ServerChart.EncodingType} encoding The data encoding to use; * must not be AUTOMATIC. * @return {string} The encoded data value. */ goog.ui.ServerChart.prototype.getConvertedValue_ = function(value, minValue, maxValue, encoding) { goog.asserts.assert(minValue <= maxValue, 'minValue should be less than or equal to maxValue'); var isExtended = (encoding == goog.ui.ServerChart.EncodingType.EXTENDED); if (goog.isNull(value) || !goog.isDef(value) || isNaN(value) || value < minValue || value > maxValue) { return isExtended ? '__' : '_'; } if (encoding == goog.ui.ServerChart.EncodingType.TEXT) { return String(value); } var frac = goog.ui.ServerChart.DEFAULT_NORMALIZATION; if (maxValue > minValue) { frac = (value - minValue) / (maxValue - minValue); // Previous checks of value ensure that 0 <= frac <= 1 at this point. } if (isExtended) { var maxIndex = goog.ui.ServerChart.CHART_VALUES_EXTENDED.length; var upperBound = goog.ui.ServerChart.EXTENDED_UPPER_BOUND; var index1 = Math.floor(frac * upperBound / maxIndex); var index2 = Math.floor((frac * upperBound) % maxIndex); var extendedVals = goog.ui.ServerChart.CHART_VALUES_EXTENDED; return extendedVals.charAt(index1) + extendedVals.charAt(index2); } var index = Math.round(frac * (goog.ui.ServerChart.CHART_VALUES.length - 1)); return goog.ui.ServerChart.CHART_VALUES.charAt(index); }; /** * Creates the chd string for chartserver. * * @private * @param {Array.<number>} values An array of numbers to graph. * @param {number} minValue The minimum value (used for normalization). * @param {number} maxValue The maximum value (used for normalization). * @param {goog.ui.ServerChart.EncodingType} encoding The data encoding to use; * must not be AUTOMATIC. * @return {string} The chd string for chartserver. */ goog.ui.ServerChart.prototype.getChartServerValues_ = function(values, minValue, maxValue, encoding) { var s = []; for (var i = 0, valuesLen = values.length; i < valuesLen; ++i) { s.push(this.getConvertedValue_(values[i], minValue, maxValue, encoding)); } return s.join( this.encodingType_ == goog.ui.ServerChart.EncodingType.TEXT ? ',' : ''); }; /** * Finds the minimum value in an array and returns it. * Needed because Math.min does not handle sparse arrays the way we want. * * @param {Array.<number?>} ary An array of values. * @return {number} The minimum value. * @private */ goog.ui.ServerChart.prototype.arrayMin_ = function(ary) { var min = Infinity; for (var i = 0, aryLen = ary.length; i < aryLen; ++i) { var value = ary[i]; if (value != null && value < min) { min = value; } } return min; }; /** * Finds the maximum value in an array and returns it. * Needed because Math.max does not handle sparse arrays the way we want. * * @param {Array.<number?>} ary An array of values. * @return {number} The maximum value. * @private */ goog.ui.ServerChart.prototype.arrayMax_ = function(ary) { var max = -Infinity; for (var i = 0, aryLen = ary.length; i < aryLen; ++i) { var value = ary[i]; if (value != null && value > max) { max = value; } } return max; }; /** @override */ goog.ui.ServerChart.prototype.disposeInternal = function() { goog.ui.ServerChart.superClass_.disposeInternal.call(this); delete this.xLabels_; delete this.leftLabels_; delete this.rightLabels_; delete this.gridX_; delete this.gridY_; delete this.setColors_; delete this.setLegendTexts_; delete this.dataSets_; this.uri_ = null; delete this.minValue_; delete this.maxValue_; this.title_ = null; delete this.multiAxisType_; delete this.multiAxisLabelText_; delete this.multiAxisLabelPosition_; delete this.multiAxisRange_; delete this.multiAxisLabelStyle_; this.legend_ = null; }; /** * Event types dispatched by the ServerChart object * @enum {string} */ goog.ui.ServerChart.Event = { /** * Dispatched when the resulting URI reaches or exceeds the URI length limit. */ URI_TOO_LONG: 'uritoolong' }; /** * Class for the event dispatched on the ServerChart when the resulting URI * exceeds the URI length limit. * @constructor * @param {string} uri The overly-long URI string. * @extends {goog.events.Event} */ goog.ui.ServerChart.UriTooLongEvent = function(uri) { goog.events.Event.call(this, goog.ui.ServerChart.Event.URI_TOO_LONG); /** * The overly-long URI string. * @type {string} */ this.uri = uri; }; goog.inherits(goog.ui.ServerChart.UriTooLongEvent, goog.events.Event);<|fim▁end|>
'img', {'src': this.getUri(), 'class': goog.getCssName('goog-serverchart-image'),
<|file_name|>notifier.py<|end_file_name|><|fim▁begin|># ---------------------------------------------------------------------- # Copyright (c) 2014 Rafael Gonzalez. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # ---------------------------------------------------------------------- # ========================== DESIGN NOTES ============================== # # The EMA hardware is quite smart. It does many things on its own: # if any of its various thresholds are reached it opens or closes relays # Threshold caluclations are done inside the EMA. # But there may be other actions that could be triggered # when the roof or aux relay or change state, like sending an SMS. # # Also, in my current setup, the battery voltage should be carefully # controled. If volage falls below a threshold, we should start # switching off all devices except EMAitself. # # This module is the real added value of this EMA Python server # to this excellent hardware. # # It allows you to trigger scripts that do interesting things like # sendimg SMS or switching off its own computer (a Raspberry Pi) # # Scripts can be written in any language you like, of course. This # project includes scripts to send SMS using the python-gammu # binding to gammu project. # # I have preferred to trigger arbitrary scripts rather than a tight # integration to this EMA server. Loosely couple modules evolve better # over time. # # Scripts are forked in background and can be triggered each time # the event takes places or just once. There will be only one script # process running. If a recurring event takes place and the script is # still active, the new script is not launched. # # Notifier Class responsibilities: # 1) capture Voltage Low , Roff Relay on/off # and Aux Relay off/on events # 2) Hold Script objects to run # 3) Run them when the event comes. # # Script Class responsibilities # 1) Hold a path to the external script file # 2) Knows what its execution mode is (run once, run many times) # 3) Forks the script in background and does not wait for its completion # # We use an exception to signal notifier about an porcess already being # executed. I think this is cleaner than carrying return information # across two levels. # # In V2.0, notifier is generic, allowing regsitering and execution of any # event # ====================================================================== import logging import subprocess import os log = logging.getLogger('notifier') class ExecutedScript(Exception): '''Signals a script has executed''' def __init__(self, name, *args): self.name = name self.args = args def __str__(self): '''Prints useful information''' tmp = '' for arg in self.args: tmp += ' ' + arg return self.name + ' ' + tmp class Script(object): '''Notifier creates Script wrapper objects, representing scripts to be launched''' # modes as constants NEVER = 0 ONCE = 1 MANY = 2 # mappping from strings to numbers MODES = { 'Never' : NEVER, 'Once' : ONCE, 'Many' : MANY } def __init__(self, cfg): self.mode = Script.MODES[cfg[1]] self.path = cfg[0] self.name = os.path.basename(self.path) self.child = None self.executed = False def runOnce(self, *args): '''run only once in the whole server lifetime''' # skip if already run # otherwise, spawn it if self.executed: return False # If not running, spawn it try: self.child = subprocess.Popen((self.path,) + args) except (OSError, ValueError) as e: log.error("runOnce(%s): %s", self.path, e) else: self.executed = True raise ExecutedScript(self.name, *args) def runMany(self, *args): '''Run one more time, if previous run completed''' # Check existing script already running # If running we don't go any further and return. # otherwise, spawn it. if self.child: self.child.poll() if self.child.returncode is None: log.warning("script %s has not finished. Can't launch it again", self.name) return try: self.child = subprocess.Popen((self.path,) + args) except (OSError, ValueError) as e: log.error("runMany(%s): %s", self.path, e) else: raise ExecutedScript(self.name, *args) return <|fim▁hole|> return if self.mode == Script.ONCE: self.runOnce(*args) elif self.mode == Script.MANY: self.runMany(*args) class Notifier(object): '''Notifies EMA events to third parties by executing scripts''' # Modes as a set text strings to be used in config file MODES = {'Never', 'Once', 'Many'} def __init__(self): pass self.scripts = {} # --------------------------- # Adding scripts to notifier # --------------------------- def addScript(self, event, mode, path): ''' *_script are tuples of (path, mode)''' aList = self.scripts.get(event,[]) aList.append(Script((path, mode))) self.scripts[event] = aList # --------------------------- # Event handler from Devices # --------------------------- def onEventExecute(self, event, *args): try: for script in self.scripts[event]: script.run(*args) except ExecutedScript as e: log.warning("On event %s executed script => %s ", event, e)<|fim▁end|>
def run(self, *args): '''Launch a script, depending on the launch mode''' # Skip if no script is configured if not self.path:
<|file_name|>vocab_utils.py<|end_file_name|><|fim▁begin|># Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility to handle vocabularies.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import codecs import os import tensorflow.compat.v1 as tf from REDACTED.tensorflow.python.ops import lookup_ops from REDACTED.nmt.utils import misc_utils as utils # word level special token UNK = "<unk>" SOS = "<s>" EOS = "</s>" UNK_ID = 0 # char ids 0-255 come from utf-8 encoding bytes # assign 256-300 to special chars BOS_CHAR_ID = 256 # <begin sentence> EOS_CHAR_ID = 257 # <end sentence> BOW_CHAR_ID = 258 # <begin word> EOW_CHAR_ID = 259 # <end word> PAD_CHAR_ID = 260 # <padding> DEFAULT_CHAR_MAXLEN = 50 # max number of chars for each word. def _string_to_bytes(text, max_length): """Given string and length, convert to byte seq of at most max_length. This process mimics docqa/elmo's preprocessing: https://github.com/allenai/document-qa/blob/master/docqa/elmo/data.py Note that we make use of BOS_CHAR_ID and EOS_CHAR_ID in iterator_utils.py & our usage differs from docqa/elmo. Args: text: tf.string tensor of shape [] max_length: max number of chars for each word. Returns: A tf.int32 tensor of the byte encoded text. """ byte_ids = tf.to_int32(tf.decode_raw(text, tf.uint8)) byte_ids = byte_ids[:max_length - 2] padding = tf.fill([max_length - tf.shape(byte_ids)[0] - 2], PAD_CHAR_ID) byte_ids = tf.concat( [[BOW_CHAR_ID], byte_ids, [EOW_CHAR_ID], padding], axis=0) tf.logging.info(byte_ids) byte_ids = tf.reshape(byte_ids, [max_length]) tf.logging.info(byte_ids.get_shape().as_list()) return byte_ids + 1 def tokens_to_bytes(tokens): """Given a sequence of strings, map to sequence of bytes. Args: tokens: A tf.string tensor Returns: A tensor of shape words.shape + [bytes_per_word] containing byte versions of each word. """ bytes_per_word = DEFAULT_CHAR_MAXLEN with tf.device("/cpu:0"): tf.assert_rank(tokens, 1) shape = tf.shape(tokens) tf.logging.info(tokens) tokens_flat = tf.reshape(tokens, [-1]) as_bytes_flat = tf.map_fn( fn=lambda x: _string_to_bytes(x, max_length=bytes_per_word), elems=tokens_flat, dtype=tf.int32, back_prop=False) tf.logging.info(as_bytes_flat) as_bytes = tf.reshape(as_bytes_flat, [shape[0], bytes_per_word]) return as_bytes def load_vocab(vocab_file): vocab = [] with codecs.getreader("utf-8")(tf.gfile.GFile(vocab_file, "rb")) as f: vocab_size = 0 for word in f: vocab_size += 1 vocab.append(word.strip()) return vocab, vocab_size def check_vocab(vocab_file, out_dir, check_special_token=True, sos=None, eos=None, unk=None): """Check if vocab_file doesn't exist, create from corpus_file.""" if tf.gfile.Exists(vocab_file): utils.print_out("# Vocab file %s exists" % vocab_file) vocab, vocab_size = load_vocab(vocab_file) if check_special_token: # Verify if the vocab starts with unk, sos, eos # If not, prepend those tokens & generate a new vocab file if not unk: unk = UNK if not sos: sos = SOS if not eos: eos = EOS assert len(vocab) >= 3 if vocab[0] != unk or vocab[1] != sos or vocab[2] != eos: utils.print_out("The first 3 vocab words [%s, %s, %s]" " are not [%s, %s, %s]" % (vocab[0], vocab[1], vocab[2], unk, sos, eos)) vocab = [unk, sos, eos] + vocab vocab_size += 3 new_vocab_file = os.path.join(out_dir, os.path.basename(vocab_file)) with codecs.getwriter("utf-8")( tf.gfile.GFile(new_vocab_file, "wb")) as f: for word in vocab: f.write("%s\n" % word) vocab_file = new_vocab_file else: raise ValueError("vocab_file '%s' does not exist." % vocab_file) vocab_size = len(vocab) return vocab_size, vocab_file def create_vocab_tables(src_vocab_file): """Creates vocab tables for src_vocab_file and tgt_vocab_file.""" src_vocab_table = lookup_ops.index_table_from_file( src_vocab_file, default_value=UNK_ID) tgt_vocab_table = src_vocab_table return src_vocab_table, tgt_vocab_table def load_embed_txt(embed_file): """Load embed_file into a python dictionary. Note: the embed_file should be a Glove/word2vec formatted txt file. Assuming Here is an exampe assuming embed_size=5: the -0.071549 0.093459 0.023738 -0.090339 0.056123 to 0.57346 0.5417 -0.23477 -0.3624 0.4037 and 0.20327 0.47348 0.050877 0.002103 0.060547 For word2vec format, the first line will be: <num_words> <emb_size>. Args: embed_file: file path to the embedding file. Returns: a dictionary that maps word to vector, and the size of embedding dimensions. """ emb_dict = dict() emb_size = None is_first_line = True with codecs.getreader("utf-8")(tf.gfile.GFile(embed_file, "rb")) as f: for line in f: tokens = line.rstrip().split(" ") if is_first_line: is_first_line = False if len(tokens) == 2: # header line emb_size = int(tokens[1]) continue word = tokens[0] vec = list(map(float, tokens[1:])) emb_dict[word] = vec if emb_size:<|fim▁hole|> else: emb_size = len(vec) return emb_dict, emb_size<|fim▁end|>
if emb_size != len(vec): utils.print_out( "Ignoring %s since embeding size is inconsistent." % word) del emb_dict[word]
<|file_name|>joint.shapes.fsa.js<|end_file_name|><|fim▁begin|>/*! JointJS v3.4.1 (2021-08-18) - JavaScript diagramming library This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. */ this.joint = this.joint || {}; this.joint.shapes = this.joint.shapes || {}; (function (exports, basic_mjs, Element_mjs, Link_mjs) { 'use strict'; var State = basic_mjs.Circle.define('fsa.State', { attrs: { circle: { 'stroke-width': 3 }, text: { 'font-weight': '800' } } }); var StartState = Element_mjs.Element.define('fsa.StartState', { size: { width: 20, height: 20 }, attrs: { circle: { transform: 'translate(10, 10)', r: 10, fill: '#000000' } } }, { markup: '<g class="rotatable"><g class="scalable"><circle/></g></g>', }); var EndState = Element_mjs.Element.define('fsa.EndState', { size: { width: 20, height: 20 }, attrs: { '.outer': { transform: 'translate(10, 10)',<|fim▁hole|> stroke: '#000000' }, '.inner': { transform: 'translate(10, 10)', r: 6, fill: '#000000' } } }, { markup: '<g class="rotatable"><g class="scalable"><circle class="outer"/><circle class="inner"/></g></g>', }); var Arrow = Link_mjs.Link.define('fsa.Arrow', { attrs: { '.marker-target': { d: 'M 10 0 L 0 5 L 10 10 z' }}, smooth: true }); exports.Arrow = Arrow; exports.EndState = EndState; exports.StartState = StartState; exports.State = State; }(this.joint.shapes.fsa = this.joint.shapes.fsa || {}, joint.shapes.basic, joint.dia, joint.dia));<|fim▁end|>
r: 10, fill: '#ffffff',
<|file_name|>imagebackend.py<|end_file_name|><|fim▁begin|># Copyright 2012 Grid Dynamics # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import base64 import contextlib import functools import os import shutil from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import fileutils from oslo_utils import strutils from oslo_utils import units import six import nova.conf from nova import exception from nova.i18n import _ from nova.i18n import _LE, _LI, _LW from nova import image from nova import keymgr from nova import utils from nova.virt.disk import api as disk from nova.virt.image import model as imgmodel from nova.virt import images from nova.virt.libvirt import config as vconfig from nova.virt.libvirt.storage import dmcrypt from nova.virt.libvirt.storage import lvm from nova.virt.libvirt.storage import rbd_utils from nova.virt.libvirt import utils as libvirt_utils __imagebackend_opts = [ cfg.StrOpt('images_type', default='default', choices=('raw', 'qcow2', 'lvm', 'rbd', 'ploop', 'default'), help='VM Images format. If default is specified, then' ' use_cow_images flag is used instead of this one.'), cfg.StrOpt('images_volume_group', help='LVM Volume Group that is used for VM images, when you' ' specify images_type=lvm.'), cfg.BoolOpt('sparse_logical_volumes', default=False, help='Create sparse logical volumes (with virtualsize)' ' if this flag is set to True.'), cfg.StrOpt('images_rbd_pool', default='rbd', help='The RADOS pool in which rbd volumes are stored'), cfg.StrOpt('images_rbd_ceph_conf', default='', # default determined by librados help='Path to the ceph configuration file to use'), cfg.StrOpt('hw_disk_discard', choices=('ignore', 'unmap'), help='Discard option for nova managed disks. Need' ' Libvirt(1.0.6) Qemu1.5 (raw format) Qemu1.6(qcow2' ' format)'), ] CONF = nova.conf.CONF CONF.register_opts(__imagebackend_opts, 'libvirt') CONF.import_opt('rbd_user', 'nova.virt.libvirt.volume.net', group='libvirt') CONF.import_opt('rbd_secret_uuid', 'nova.virt.libvirt.volume.net', group='libvirt') LOG = logging.getLogger(__name__) IMAGE_API = image.API() @six.add_metaclass(abc.ABCMeta) class Image(object): SUPPORTS_CLONE = False def __init__(self, source_type, driver_format, is_block_dev=False): """Image initialization. :source_type: block or file :driver_format: raw or qcow2 :is_block_dev: """ if (CONF.ephemeral_storage_encryption.enabled and not self._supports_encryption()): raise exception.NovaException(_('Incompatible settings: ' 'ephemeral storage encryption is supported ' 'only for LVM images.')) self.source_type = source_type self.driver_format = driver_format self.driver_io = None self.discard_mode = CONF.libvirt.hw_disk_discard self.is_block_dev = is_block_dev self.preallocate = False # NOTE(dripton): We store lines of json (path, disk_format) in this # file, for some image types, to prevent attacks based on changing the # disk_format. self.disk_info_path = None # NOTE(mikal): We need a lock directory which is shared along with # instance files, to cover the scenario where multiple compute nodes # are trying to create a base file at the same time self.lock_path = os.path.join(CONF.instances_path, 'locks') def _supports_encryption(self): """Used to test that the backend supports encryption. Override in the subclass if backend supports encryption. """ return False @abc.abstractmethod def create_image(self, prepare_template, base, size, *args, **kwargs): """Create image from template. Contains specific behavior for each image type. :prepare_template: function, that creates template. Should accept `target` argument. :base: Template name :size: Size of created image in bytes """ pass @abc.abstractmethod def resize_image(self, size): """Resize image to size (in bytes). :size: Desired size of image in bytes """ pass def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode, extra_specs, hypervisor_version): """Get `LibvirtConfigGuestDisk` filled for this image. :disk_dev: Disk bus device name :disk_bus: Disk bus type :device_type: Device type for this image. :cache_mode: Caching mode for this image :extra_specs: Instance type extra specs dict. :hypervisor_version: the hypervisor version """ info = vconfig.LibvirtConfigGuestDisk() info.source_type = self.source_type info.source_device = device_type info.target_bus = disk_bus info.target_dev = disk_dev info.driver_cache = cache_mode info.driver_discard = self.discard_mode info.driver_io = self.driver_io info.driver_format = self.driver_format driver_name = libvirt_utils.pick_disk_driver_name(hypervisor_version, self.is_block_dev) info.driver_name = driver_name info.source_path = self.path self.disk_qos(info, extra_specs) return info def disk_qos(self, info, extra_specs): tune_items = ['disk_read_bytes_sec', 'disk_read_iops_sec', 'disk_write_bytes_sec', 'disk_write_iops_sec', 'disk_total_bytes_sec', 'disk_total_iops_sec'] for key, value in six.iteritems(extra_specs): scope = key.split(':') if len(scope) > 1 and scope[0] == 'quota': if scope[1] in tune_items: setattr(info, scope[1], value) def libvirt_fs_info(self, target, driver_type=None): """Get `LibvirtConfigGuestFilesys` filled for this image. :target: target directory inside a container. :driver_type: filesystem driver type, can be loop nbd or ploop. """ info = vconfig.LibvirtConfigGuestFilesys() info.target_dir = target if self.is_block_dev: info.source_type = "block" info.source_dev = self.path else: info.source_type = "file" info.source_file = self.path info.driver_format = self.driver_format if driver_type: info.driver_type = driver_type else: if self.driver_format == "raw": info.driver_type = "loop" else: info.driver_type = "nbd" return info def check_image_exists(self): return os.path.exists(self.path) def cache(self, fetch_func, filename, size=None, *args, **kwargs): """Creates image from template. Ensures that template and image not already exists. Ensures that base directory exists. Synchronizes on template fetching. :fetch_func: Function that creates the base image Should accept `target` argument. :filename: Name of the file in the image directory :size: Size of created image in bytes (optional) """ @utils.synchronized(filename, external=True, lock_path=self.lock_path) def fetch_func_sync(target, *args, **kwargs): # The image may have been fetched while a subsequent # call was waiting to obtain the lock. if not os.path.exists(target): fetch_func(target=target, *args, **kwargs) base_dir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) if not os.path.exists(base_dir): fileutils.ensure_tree(base_dir) base = os.path.join(base_dir, filename) if not self.check_image_exists() or not os.path.exists(base): self.create_image(fetch_func_sync, base, size, *args, **kwargs) if size: if size > self.get_disk_size(base): self.resize_image(size) if (self.preallocate and self._can_fallocate() and os.access(self.path, os.W_OK)): utils.execute('fallocate', '-n', '-l', size, self.path) def _can_fallocate(self): """Check once per class, whether fallocate(1) is available, and that the instances directory supports fallocate(2). """ can_fallocate = getattr(self.__class__, 'can_fallocate', None) if can_fallocate is None: test_path = self.path + '.fallocate_test' _out, err = utils.trycmd('fallocate', '-l', '1', test_path) fileutils.delete_if_exists(test_path) can_fallocate = not err self.__class__.can_fallocate = can_fallocate if not can_fallocate: LOG.warning(_LW('Unable to preallocate image at path: ' '%(path)s'), {'path': self.path}) return can_fallocate def verify_base_size(self, base, size, base_size=0): """Check that the base image is not larger than size. Since images can't be generally shrunk, enforce this constraint taking account of virtual image size. """ # Note(pbrady): The size and min_disk parameters of a glance # image are checked against the instance size before the image # is even downloaded from glance, but currently min_disk is # adjustable and doesn't currently account for virtual disk size, # so we need this extra check here. # NOTE(cfb): Having a flavor that sets the root size to 0 and having # nova effectively ignore that size and use the size of the # image is considered a feature at this time, not a bug. if size is None: return if size and not base_size: base_size = self.get_disk_size(base) if size < base_size: msg = _LE('%(base)s virtual size %(base_size)s ' 'larger than flavor root disk size %(size)s') LOG.error(msg % {'base': base, 'base_size': base_size, 'size': size}) raise exception.FlavorDiskSmallerThanImage( flavor_size=size, image_size=base_size) def get_disk_size(self, name): return disk.get_disk_size(name) def snapshot_extract(self, target, out_format): raise NotImplementedError() def _get_driver_format(self): return self.driver_format def resolve_driver_format(self): """Return the driver format for self.path. First checks self.disk_info_path for an entry. If it's not there, calls self._get_driver_format(), and then stores the result in self.disk_info_path See https://bugs.launchpad.net/nova/+bug/1221190 """ def _dict_from_line(line): if not line: return {} try: return jsonutils.loads(line) except (TypeError, ValueError) as e: msg = (_("Could not load line %(line)s, got error " "%(error)s") % {'line': line, 'error': e}) raise exception.InvalidDiskInfo(reason=msg) @utils.synchronized(self.disk_info_path, external=False, lock_path=self.lock_path) def write_to_disk_info_file(): # Use os.open to create it without group or world write permission. fd = os.open(self.disk_info_path, os.O_RDONLY | os.O_CREAT, 0o644) with os.fdopen(fd, "r") as disk_info_file: line = disk_info_file.read().rstrip() dct = _dict_from_line(line) if self.path in dct: msg = _("Attempted overwrite of an existing value.") raise exception.InvalidDiskInfo(reason=msg) dct.update({self.path: driver_format}) tmp_path = self.disk_info_path + ".tmp" fd = os.open(tmp_path, os.O_WRONLY | os.O_CREAT, 0o644) with os.fdopen(fd, "w") as tmp_file: tmp_file.write('%s\n' % jsonutils.dumps(dct)) os.rename(tmp_path, self.disk_info_path) try: if (self.disk_info_path is not None and os.path.exists(self.disk_info_path)): with open(self.disk_info_path) as disk_info_file: line = disk_info_file.read().rstrip() dct = _dict_from_line(line) for path, driver_format in six.iteritems(dct): if path == self.path: return driver_format driver_format = self._get_driver_format() if self.disk_info_path is not None: fileutils.ensure_tree(os.path.dirname(self.disk_info_path)) write_to_disk_info_file() except OSError as e: raise exception.DiskInfoReadWriteFail(reason=six.text_type(e)) return driver_format @staticmethod def is_shared_block_storage(): """True if the backend puts images on a shared block storage.""" return False @staticmethod def is_file_in_instance_path(): """True if the backend stores images in files under instance path.""" return False def clone(self, context, image_id_or_uri): """Clone an image. Note that clone operation is backend-dependent. The backend may ask the image API for a list of image "locations" and select one or more of those locations to clone an image from. :param image_id_or_uri: The ID or URI of an image to clone. :raises: exception.ImageUnacceptable if it cannot be cloned """ reason = _('clone() is not implemented') raise exception.ImageUnacceptable(image_id=image_id_or_uri, reason=reason) def direct_snapshot(self, context, snapshot_name, image_format, image_id, base_image_id): """Prepare a snapshot for direct reference from glance :raises: exception.ImageUnacceptable if it cannot be referenced directly in the specified image format :returns: URL to be given to glance """ raise NotImplementedError(_('direct_snapshot() is not implemented')) def cleanup_direct_snapshot(self, location, also_destroy_volume=False, ignore_errors=False): """Performs any cleanup actions required after calling direct_snapshot(), for graceful exception handling and the like. This should be a no-op on any backend where it is not implemented. """ pass def _get_lock_name(self, base): """Get an image's name of a base file.""" return os.path.split(base)[-1] def get_model(self, connection): """Get the image information model :returns: an instance of nova.virt.image.model.Image """ raise NotImplementedError() def import_file(self, instance, local_file, remote_name): """Import an image from local storage into this backend. Import a local file into the store used by this image type. Note that this is a noop for stores using local disk (the local file is considered "in the store"). If the image already exists it will be overridden by the new file :param local_file: path to the file to import :param remote_name: the name for the file in the store """ # NOTE(mikal): this is a noop for now for all stores except RBD, but # we should talk about if we want this functionality for everything. pass def create_snap(self, name): """Create a snapshot on the image. A noop on backends that don't support snapshots. :param name: name of the snapshot """ pass def remove_snap(self, name, ignore_errors=False): """Remove a snapshot on the image. A noop on backends that don't support snapshots. :param name: name of the snapshot :param ignore_errors: don't log errors if the snapshot does not exist """ pass def rollback_to_snap(self, name): """Rollback the image to the named snapshot. A noop on backends that don't support snapshots. :param name: name of the snapshot """ pass class Raw(Image): def __init__(self, instance=None, disk_name=None, path=None): self.disk_name = disk_name super(Raw, self).__init__("file", "raw", is_block_dev=False) self.path = (path or os.path.join(libvirt_utils.get_instance_path(instance), disk_name)) self.preallocate = ( strutils.to_slug(CONF.preallocate_images) == 'space') if self.preallocate: self.driver_io = "native" self.disk_info_path = os.path.join(os.path.dirname(self.path), 'disk.info') self.correct_format() def _get_driver_format(self): try: data = images.qemu_img_info(self.path) return data.file_format except exception.InvalidDiskInfo as e: LOG.info(_LI('Failed to get image info from path %(path)s; ' 'error: %(error)s'), {'path': self.path, 'error': e}) return 'raw' def _supports_encryption(self): # NOTE(dgenin): Kernel, ramdisk and disk.config are fetched using # the Raw backend regardless of which backend is configured for # ephemeral storage. Encryption for the Raw backend is not yet # implemented so this loophole is necessary to allow other # backends already supporting encryption to function. This can # be removed once encryption for Raw is implemented. if self.disk_name not in ['kernel', 'ramdisk', 'disk.config']: return False else: return True def correct_format(self): if os.path.exists(self.path): self.driver_format = self.resolve_driver_format() def create_image(self, prepare_template, base, size, *args, **kwargs): filename = self._get_lock_name(base) @utils.synchronized(filename, external=True, lock_path=self.lock_path) def copy_raw_image(base, target, size): libvirt_utils.copy_image(base, target) if size: # class Raw is misnamed, format may not be 'raw' in all cases image = imgmodel.LocalFileImage(target, self.driver_format) disk.extend(image, size) generating = 'image_id' not in kwargs if generating: if not self.check_image_exists(): # Generating image in place prepare_template(target=self.path, *args, **kwargs) else: if not os.path.exists(base): prepare_template(target=base, max_size=size, *args, **kwargs) # NOTE(mikal): Update the mtime of the base file so the image # cache manager knows it is in use. libvirt_utils.update_mtime(base) self.verify_base_size(base, size) if not os.path.exists(self.path): with fileutils.remove_path_on_error(self.path): copy_raw_image(base, self.path, size) self.correct_format() def resize_image(self, size): image = imgmodel.LocalFileImage(self.path, self.driver_format) disk.extend(image, size) def snapshot_extract(self, target, out_format): images.convert_image(self.path, target, self.driver_format, out_format) @staticmethod def is_file_in_instance_path(): return True def get_model(self, connection): return imgmodel.LocalFileImage(self.path, imgmodel.FORMAT_RAW) class Qcow2(Image): def __init__(self, instance=None, disk_name=None, path=None): super(Qcow2, self).__init__("file", "qcow2", is_block_dev=False) self.path = (path or os.path.join(libvirt_utils.get_instance_path(instance), disk_name)) self.preallocate = ( strutils.to_slug(CONF.preallocate_images) == 'space') if self.preallocate: self.driver_io = "native" self.disk_info_path = os.path.join(os.path.dirname(self.path), 'disk.info') self.resolve_driver_format() def create_image(self, prepare_template, base, size, *args, **kwargs): filename = self._get_lock_name(base) @utils.synchronized(filename, external=True, lock_path=self.lock_path) def copy_qcow2_image(base, target, size): # TODO(pbrady): Consider copying the cow image here # with preallocation=metadata set for performance reasons. # This would be keyed on a 'preallocate_images' setting. libvirt_utils.create_cow_image(base, target) if size: image = imgmodel.LocalFileImage(target, imgmodel.FORMAT_QCOW2) disk.extend(image, size) # Download the unmodified base image unless we already have a copy. if not os.path.exists(base): prepare_template(target=base, max_size=size, *args, **kwargs) # NOTE(ankit): Update the mtime of the base file so the image # cache manager knows it is in use. libvirt_utils.update_mtime(base) self.verify_base_size(base, size) legacy_backing_size = None legacy_base = base # Determine whether an existing qcow2 disk uses a legacy backing by # actually looking at the image itself and parsing the output of the # backing file it expects to be using. if os.path.exists(self.path): backing_path = libvirt_utils.get_disk_backing_file(self.path) if backing_path is not None: backing_file = os.path.basename(backing_path) backing_parts = backing_file.rpartition('_') if backing_file != backing_parts[-1] and \ backing_parts[-1].isdigit(): legacy_backing_size = int(backing_parts[-1]) legacy_base += '_%d' % legacy_backing_size legacy_backing_size *= units.Gi # Create the legacy backing file if necessary. if legacy_backing_size: if not os.path.exists(legacy_base): with fileutils.remove_path_on_error(legacy_base): libvirt_utils.copy_image(base, legacy_base) image = imgmodel.LocalFileImage(legacy_base, imgmodel.FORMAT_QCOW2) disk.extend(image, legacy_backing_size) if not os.path.exists(self.path): with fileutils.remove_path_on_error(self.path): copy_qcow2_image(base, self.path, size) def resize_image(self, size): image = imgmodel.LocalFileImage(self.path, imgmodel.FORMAT_QCOW2) disk.extend(image, size) def snapshot_extract(self, target, out_format): libvirt_utils.extract_snapshot(self.path, 'qcow2', target, out_format) @staticmethod def is_file_in_instance_path(): return True def get_model(self, connection): return imgmodel.LocalFileImage(self.path, imgmodel.FORMAT_QCOW2) class Lvm(Image): @staticmethod def escape(filename): return filename.replace('_', '__') def __init__(self, instance=None, disk_name=None, path=None): super(Lvm, self).__init__("block", "raw", is_block_dev=True) self.ephemeral_key_uuid = instance.get('ephemeral_key_uuid') if self.ephemeral_key_uuid is not None: self.key_manager = keymgr.API() else: self.key_manager = None if path: self.path = path if self.ephemeral_key_uuid is None: info = lvm.volume_info(path) self.vg = info['VG'] self.lv = info['LV'] else: self.vg = CONF.libvirt.images_volume_group else: if not CONF.libvirt.images_volume_group: raise RuntimeError(_('You should specify' ' images_volume_group' ' flag to use LVM images.')) self.vg = CONF.libvirt.images_volume_group self.lv = '%s_%s' % (instance.uuid, self.escape(disk_name)) if self.ephemeral_key_uuid is None: self.path = os.path.join('/dev', self.vg, self.lv) else: self.lv_path = os.path.join('/dev', self.vg, self.lv) self.path = '/dev/mapper/' + dmcrypt.volume_name(self.lv) # TODO(pbrady): possibly deprecate libvirt.sparse_logical_volumes # for the more general preallocate_images self.sparse = CONF.libvirt.sparse_logical_volumes self.preallocate = not self.sparse if not self.sparse: self.driver_io = "native" def _supports_encryption(self): return True def _can_fallocate(self): return False def create_image(self, prepare_template, base, size, *args, **kwargs): def encrypt_lvm_image(): dmcrypt.create_volume(self.path.rpartition('/')[2], self.lv_path, CONF.ephemeral_storage_encryption.cipher, CONF.ephemeral_storage_encryption.key_size, key) filename = self._get_lock_name(base) @utils.synchronized(filename, external=True, lock_path=self.lock_path) def create_lvm_image(base, size): base_size = disk.get_disk_size(base) self.verify_base_size(base, size, base_size=base_size) resize = size > base_size size = size if resize else base_size lvm.create_volume(self.vg, self.lv, size, sparse=self.sparse) if self.ephemeral_key_uuid is not None: encrypt_lvm_image() # NOTE: by calling convert_image_unsafe here we're # telling qemu-img convert to do format detection on the input, # because we don't know what the format is. For example, # we might have downloaded a qcow2 image, or created an # ephemeral filesystem locally, we just don't know here. Having # audited this, all current sources have been sanity checked, # either because they're locally generated, or because they have # come from images.fetch_to_raw. However, this is major code smell. images.convert_image_unsafe(base, self.path, self.driver_format, run_as_root=True) if resize: disk.resize2fs(self.path, run_as_root=True) generated = 'ephemeral_size' in kwargs if self.ephemeral_key_uuid is not None: if 'context' in kwargs: try: # NOTE(dgenin): Key manager corresponding to the # specific backend catches and reraises an # an exception if key retrieval fails. key = self.key_manager.get_key(kwargs['context'], self.ephemeral_key_uuid).get_encoded() except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Failed to retrieve ephemeral encryption" " key")) else: raise exception.NovaException( _("Instance disk to be encrypted but no context provided")) # Generate images with specified size right on volume<|fim▁hole|> size, sparse=self.sparse) with self.remove_volume_on_error(self.path): if self.ephemeral_key_uuid is not None: encrypt_lvm_image() prepare_template(target=self.path, *args, **kwargs) else: if not os.path.exists(base): prepare_template(target=base, max_size=size, *args, **kwargs) with self.remove_volume_on_error(self.path): create_lvm_image(base, size) # NOTE(nic): Resizing the image is already handled in create_image(), # and migrate/resize is not supported with LVM yet, so this is a no-op def resize_image(self, size): pass @contextlib.contextmanager def remove_volume_on_error(self, path): try: yield except Exception: with excutils.save_and_reraise_exception(): if self.ephemeral_key_uuid is None: lvm.remove_volumes([path]) else: dmcrypt.delete_volume(path.rpartition('/')[2]) lvm.remove_volumes([self.lv_path]) def snapshot_extract(self, target, out_format): images.convert_image(self.path, target, self.driver_format, out_format, run_as_root=True) def get_model(self, connection): return imgmodel.LocalBlockImage(self.path) class Rbd(Image): SUPPORTS_CLONE = True def __init__(self, instance=None, disk_name=None, path=None, **kwargs): super(Rbd, self).__init__("block", "rbd", is_block_dev=False) if path: try: self.rbd_name = path.split('/')[1] except IndexError: raise exception.InvalidDevicePath(path=path) else: self.rbd_name = '%s_%s' % (instance.uuid, disk_name) if not CONF.libvirt.images_rbd_pool: raise RuntimeError(_('You should specify' ' images_rbd_pool' ' flag to use rbd images.')) self.pool = CONF.libvirt.images_rbd_pool self.discard_mode = CONF.libvirt.hw_disk_discard self.rbd_user = CONF.libvirt.rbd_user self.ceph_conf = CONF.libvirt.images_rbd_ceph_conf self.driver = rbd_utils.RBDDriver( pool=self.pool, ceph_conf=self.ceph_conf, rbd_user=self.rbd_user) self.path = 'rbd:%s/%s' % (self.pool, self.rbd_name) if self.rbd_user: self.path += ':id=' + self.rbd_user if self.ceph_conf: self.path += ':conf=' + self.ceph_conf def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode, extra_specs, hypervisor_version): """Get `LibvirtConfigGuestDisk` filled for this image. :disk_dev: Disk bus device name :disk_bus: Disk bus type :device_type: Device type for this image. :cache_mode: Caching mode for this image :extra_specs: Instance type extra specs dict. """ info = vconfig.LibvirtConfigGuestDisk() hosts, ports = self.driver.get_mon_addrs() info.source_device = device_type info.driver_format = 'raw' info.driver_cache = cache_mode info.driver_discard = self.discard_mode info.target_bus = disk_bus info.target_dev = disk_dev info.source_type = 'network' info.source_protocol = 'rbd' info.source_name = '%s/%s' % (self.pool, self.rbd_name) info.source_hosts = hosts info.source_ports = ports auth_enabled = (CONF.libvirt.rbd_user is not None) if CONF.libvirt.rbd_secret_uuid: info.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid auth_enabled = True # Force authentication locally if CONF.libvirt.rbd_user: info.auth_username = CONF.libvirt.rbd_user if auth_enabled: info.auth_secret_type = 'ceph' info.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid self.disk_qos(info, extra_specs) return info def _can_fallocate(self): return False def check_image_exists(self): return self.driver.exists(self.rbd_name) def get_disk_size(self, name): """Returns the size of the virtual disk in bytes. The name argument is ignored since this backend already knows its name, and callers may pass a non-existent local file path. """ return self.driver.size(self.rbd_name) def create_image(self, prepare_template, base, size, *args, **kwargs): if not self.check_image_exists(): prepare_template(target=base, max_size=size, *args, **kwargs) # prepare_template() may have cloned the image into a new rbd # image already instead of downloading it locally if not self.check_image_exists(): self.driver.import_image(base, self.rbd_name) self.verify_base_size(base, size) if size and size > self.get_disk_size(self.rbd_name): self.driver.resize(self.rbd_name, size) def resize_image(self, size): self.driver.resize(self.rbd_name, size) def snapshot_extract(self, target, out_format): images.convert_image(self.path, target, 'raw', out_format) @staticmethod def is_shared_block_storage(): return True def clone(self, context, image_id_or_uri): image_meta = IMAGE_API.get(context, image_id_or_uri, include_locations=True) locations = image_meta['locations'] LOG.debug('Image locations are: %(locs)s' % {'locs': locations}) if image_meta.get('disk_format') not in ['raw', 'iso']: reason = _('Image is not raw format') raise exception.ImageUnacceptable(image_id=image_id_or_uri, reason=reason) for location in locations: if self.driver.is_cloneable(location, image_meta): return self.driver.clone(location, self.rbd_name) reason = _('No image locations are accessible') raise exception.ImageUnacceptable(image_id=image_id_or_uri, reason=reason) def get_model(self, connection): secret = None if CONF.libvirt.rbd_secret_uuid: secretobj = connection.secretLookupByUUIDString( CONF.libvirt.rbd_secret_uuid) secret = base64.b64encode(secretobj.value()) hosts, ports = self.driver.get_mon_addrs() servers = [str(':'.join(k)) for k in zip(hosts, ports)] return imgmodel.RBDImage(self.rbd_name, self.pool, self.rbd_user, secret, servers) def import_file(self, instance, local_file, remote_name): name = '%s_%s' % (instance.uuid, remote_name) if self.check_image_exists(): self.driver.remove_image(name) self.driver.import_image(local_file, name) def create_snap(self, name): return self.driver.create_snap(self.rbd_name, name) def remove_snap(self, name, ignore_errors=False): return self.driver.remove_snap(self.rbd_name, name, ignore_errors) def rollback_to_snap(self, name): return self.driver.rollback_to_snap(self.rbd_name, name) def _get_parent_pool(self, context, base_image_id, fsid): parent_pool = None try: # The easy way -- the image is an RBD clone, so use the parent # images' storage pool parent_pool, _im, _snap = self.driver.parent_info(self.rbd_name) except exception.ImageUnacceptable: # The hard way -- the image is itself a parent, so ask Glance # where it came from LOG.debug('No parent info for %s; asking the Image API where its ' 'store is', base_image_id) try: image_meta = IMAGE_API.get(context, base_image_id, include_locations=True) except Exception as e: LOG.debug('Unable to get image %(image_id)s; error: %(error)s', {'image_id': base_image_id, 'error': e}) image_meta = {} # Find the first location that is in the same RBD cluster for location in image_meta.get('locations', []): try: parent_fsid, parent_pool, _im, _snap = \ self.driver.parse_url(location['url']) if parent_fsid == fsid: break else: parent_pool = None except exception.ImageUnacceptable: continue if not parent_pool: raise exception.ImageUnacceptable( _('Cannot determine the parent storage pool for %s; ' 'cannot determine where to store images') % base_image_id) return parent_pool def direct_snapshot(self, context, snapshot_name, image_format, image_id, base_image_id): """Creates an RBD snapshot directly. """ fsid = self.driver.get_fsid() # NOTE(nic): Nova has zero comprehension of how Glance's image store # is configured, but we can infer what storage pool Glance is using # by looking at the parent image. If using authx, write access should # be enabled on that pool for the Nova user parent_pool = self._get_parent_pool(context, base_image_id, fsid) # Snapshot the disk and clone it into Glance's storage pool. librbd # requires that snapshots be set to "protected" in order to clone them self.driver.create_snap(self.rbd_name, snapshot_name, protect=True) location = {'url': 'rbd://%(fsid)s/%(pool)s/%(image)s/%(snap)s' % dict(fsid=fsid, pool=self.pool, image=self.rbd_name, snap=snapshot_name)} try: self.driver.clone(location, image_id, dest_pool=parent_pool) # Flatten the image, which detaches it from the source snapshot self.driver.flatten(image_id, pool=parent_pool) finally: # all done with the source snapshot, clean it up self.cleanup_direct_snapshot(location) # Glance makes a protected snapshot called 'snap' on uploaded # images and hands it out, so we'll do that too. The name of # the snapshot doesn't really matter, this just uses what the # glance-store rbd backend sets (which is not configurable). self.driver.create_snap(image_id, 'snap', pool=parent_pool, protect=True) return ('rbd://%(fsid)s/%(pool)s/%(image)s/snap' % dict(fsid=fsid, pool=parent_pool, image=image_id)) def cleanup_direct_snapshot(self, location, also_destroy_volume=False, ignore_errors=False): """Unprotects and destroys the name snapshot. With also_destroy_volume=True, it will also cleanup/destroy the parent volume. This is useful for cleaning up when the target volume fails to snapshot properly. """ if location: _fsid, _pool, _im, _snap = self.driver.parse_url(location['url']) self.driver.remove_snap(_im, _snap, pool=_pool, force=True, ignore_errors=ignore_errors) if also_destroy_volume: self.driver.destroy_volume(_im, pool=_pool) class Ploop(Image): def __init__(self, instance=None, disk_name=None, path=None): super(Ploop, self).__init__("file", "ploop", is_block_dev=False) self.path = (path or os.path.join(libvirt_utils.get_instance_path(instance), disk_name)) self.resolve_driver_format() def create_image(self, prepare_template, base, size, *args, **kwargs): filename = os.path.split(base)[-1] @utils.synchronized(filename, external=True, lock_path=self.lock_path) def create_ploop_image(base, target, size): image_path = os.path.join(target, "root.hds") libvirt_utils.copy_image(base, image_path) utils.execute('ploop', 'restore-descriptor', '-f', self.pcs_format, target, image_path) if size: dd_path = os.path.join(self.path, "DiskDescriptor.xml") utils.execute('ploop', 'grow', '-s', '%dK' % (size >> 10), dd_path, run_as_root=True) if not os.path.exists(self.path): if CONF.force_raw_images: self.pcs_format = "raw" else: image_meta = IMAGE_API.get(kwargs["context"], kwargs["image_id"]) format = image_meta.get("disk_format") if format == "ploop": self.pcs_format = "expanded" elif format == "raw": self.pcs_format = "raw" else: reason = _("PCS doesn't support images in %s format." " You should either set force_raw_images=True" " in config or upload an image in ploop" " or raw format.") % format raise exception.ImageUnacceptable( image_id=kwargs["image_id"], reason=reason) if not os.path.exists(base): prepare_template(target=base, max_size=size, *args, **kwargs) self.verify_base_size(base, size) if os.path.exists(self.path): return fileutils.ensure_tree(self.path) remove_func = functools.partial(fileutils.delete_if_exists, remove=shutil.rmtree) with fileutils.remove_path_on_error(self.path, remove=remove_func): create_ploop_image(base, self.path, size) def resize_image(self, size): dd_path = os.path.join(self.path, "DiskDescriptor.xml") utils.execute('ploop', 'grow', '-s', '%dK' % (size >> 10), dd_path, run_as_root=True) def snapshot_extract(self, target, out_format): img_path = os.path.join(self.path, "root.hds") libvirt_utils.extract_snapshot(img_path, 'parallels', target, out_format) class Backend(object): def __init__(self, use_cow): self.BACKEND = { 'raw': Raw, 'qcow2': Qcow2, 'lvm': Lvm, 'rbd': Rbd, 'ploop': Ploop, 'default': Qcow2 if use_cow else Raw } def backend(self, image_type=None): if not image_type: image_type = CONF.libvirt.images_type image = self.BACKEND.get(image_type) if not image: raise RuntimeError(_('Unknown image_type=%s') % image_type) return image def image(self, instance, disk_name, image_type=None): """Constructs image for selected backend :instance: Instance name. :name: Image name. :image_type: Image type. Optional, is CONF.libvirt.images_type by default. """ backend = self.backend(image_type) return backend(instance=instance, disk_name=disk_name) def snapshot(self, instance, disk_path, image_type=None): """Returns snapshot for given image :path: path to image :image_type: type of image """ backend = self.backend(image_type) return backend(instance=instance, path=disk_path)<|fim▁end|>
if generated and size: lvm.create_volume(self.vg, self.lv,
<|file_name|>CustomTypeFaceSpan.java<|end_file_name|><|fim▁begin|>package fyp.hkust.facet.util; /** * Created by ClementNg on 2/4/2017. */ import android.graphics.Paint; import android.graphics.Typeface;<|fim▁hole|> private final Typeface newType; public CustomTypeFaceSpan(String family, Typeface type) { super(family); newType = type; } @Override public void updateDrawState(TextPaint ds) { applyCustomTypeFace(ds, newType); } @Override public void updateMeasureState(TextPaint paint) { applyCustomTypeFace(paint, newType); } private static void applyCustomTypeFace(Paint paint, Typeface tf) { int oldStyle; Typeface old = paint.getTypeface(); if (old == null) { oldStyle = 0; } else { oldStyle = old.getStyle(); } int fake = oldStyle & ~tf.getStyle(); if ((fake & Typeface.BOLD) != 0) { paint.setFakeBoldText(true); } if ((fake & Typeface.ITALIC) != 0) { paint.setTextSkewX(-0.25f); } paint.setTypeface(tf); } }<|fim▁end|>
import android.text.TextPaint; import android.text.style.TypefaceSpan; public class CustomTypeFaceSpan extends TypefaceSpan {
<|file_name|>drag_drop_tracker_unittest.cc<|end_file_name|><|fim▁begin|>// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ash/drag_drop/drag_drop_tracker.h" #include "ash/shell.h" #include "ash/shell_window_ids.h" #include "ash/test/ash_test_base.h" #include "base/memory/scoped_ptr.h" #include "ui/aura/root_window.h" #include "ui/aura/test/test_windows.h" #include "ui/aura/window.h" namespace ash { namespace test { class DragDropTrackerTest : public test::AshTestBase { public: virtual void SetUp() OVERRIDE { AshTestBase::SetUp(); UpdateDisplay("200x200,200x200"); } aura::Window* CreateTestWindow(const gfx::Rect& bounds) { static int window_id = 0; return CreateTestWindowInShellWithDelegate( aura::test::TestWindowDelegate::CreateSelfDestroyingDelegate(), window_id++, bounds); } static aura::Window* GetTarget(const gfx::Point& location) { scoped_ptr<internal::DragDropTracker> tracker( new internal::DragDropTracker(Shell::GetPrimaryRootWindow(), NULL)); ui::MouseEvent e(ui::ET_MOUSE_DRAGGED, location, location, ui::EF_NONE); aura::Window* target = tracker->GetTarget(e); return target; } static ui::LocatedEvent* ConvertEvent(aura::Window* target, const ui::MouseEvent& event) { scoped_ptr<internal::DragDropTracker> tracker( new internal::DragDropTracker(Shell::GetPrimaryRootWindow(), NULL)); ui::LocatedEvent* converted = tracker->ConvertEvent(target, event); return converted; } }; // TODO(mazda): Remove this once ash/wm/coordinate_conversion.h supports // non-X11 platforms. #if defined(USE_X11) #define MAYBE_GetTarget GetTarget #else #define MAYBE_GetTarget DISABLED_GetTarget #endif TEST_F(DragDropTrackerTest, MAYBE_GetTarget) { Shell::RootWindowList root_windows = Shell::GetAllRootWindows(); EXPECT_EQ(2U, root_windows.size()); scoped_ptr<aura::Window> window0( CreateTestWindow(gfx::Rect(0, 0, 100, 100)));<|fim▁hole|> window0->Show(); scoped_ptr<aura::Window> window1( CreateTestWindow(gfx::Rect(300, 100, 100, 100))); window1->Show(); EXPECT_EQ(root_windows[0], window0->GetRootWindow()); EXPECT_EQ(root_windows[1], window1->GetRootWindow()); EXPECT_EQ("0,0 100x100", window0->GetBoundsInScreen().ToString()); EXPECT_EQ("300,100 100x100", window1->GetBoundsInScreen().ToString()); // Make RootWindow0 active so that capture window is parented to it. Shell::GetInstance()->set_active_root_window(root_windows[0]); // Start tracking from the RootWindow1 and check the point on RootWindow0 that // |window0| covers. EXPECT_EQ(window0.get(), GetTarget(gfx::Point(50, 50))); // Start tracking from the RootWindow0 and check the point on RootWindow0 that // neither |window0| nor |window1| covers. EXPECT_NE(window0.get(), GetTarget(gfx::Point(150, 150))); EXPECT_NE(window1.get(), GetTarget(gfx::Point(150, 150))); // Start tracking from the RootWindow0 and check the point on RootWindow1 that // |window1| covers. EXPECT_EQ(window1.get(), GetTarget(gfx::Point(350, 150))); // Start tracking from the RootWindow0 and check the point on RootWindow1 that // neither |window0| nor |window1| covers. EXPECT_NE(window0.get(), GetTarget(gfx::Point(50, 250))); EXPECT_NE(window1.get(), GetTarget(gfx::Point(50, 250))); // Make RootWindow1 active so that capture window is parented to it. Shell::GetInstance()->set_active_root_window(root_windows[1]); // Start tracking from the RootWindow1 and check the point on RootWindow0 that // |window0| covers. EXPECT_EQ(window0.get(), GetTarget(gfx::Point(-150, 50))); // Start tracking from the RootWindow1 and check the point on RootWindow0 that // neither |window0| nor |window1| covers. EXPECT_NE(window0.get(), GetTarget(gfx::Point(150, -50))); EXPECT_NE(window1.get(), GetTarget(gfx::Point(150, -50))); // Start tracking from the RootWindow1 and check the point on RootWindow1 that // |window1| covers. EXPECT_EQ(window1.get(), GetTarget(gfx::Point(150, 150))); // Start tracking from the RootWindow1 and check the point on RootWindow1 that // neither |window0| nor |window1| covers. EXPECT_NE(window0.get(), GetTarget(gfx::Point(50, 50))); EXPECT_NE(window1.get(), GetTarget(gfx::Point(50, 50))); } // TODO(mazda): Remove this once ash/wm/coordinate_conversion.h supports // non-X11 platforms. #if defined(USE_X11) #define MAYBE_ConvertEvent ConvertEvent #else #define MAYBE_ConvertEvent DISABLED_ConvertEvent #endif TEST_F(DragDropTrackerTest, MAYBE_ConvertEvent) { Shell::RootWindowList root_windows = Shell::GetAllRootWindows(); EXPECT_EQ(2U, root_windows.size()); scoped_ptr<aura::Window> window0( CreateTestWindow(gfx::Rect(0, 0, 100, 100))); window0->Show(); scoped_ptr<aura::Window> window1( CreateTestWindow(gfx::Rect(300, 100, 100, 100))); window1->Show(); // Make RootWindow0 active so that capture window is parented to it. Shell::GetInstance()->set_active_root_window(root_windows[0]); // Start tracking from the RootWindow0 and converts the mouse event into // |window0|'s coodinates. ui::MouseEvent original00(ui::ET_MOUSE_DRAGGED, gfx::Point(50, 50), gfx::Point(50, 50), ui::EF_NONE); scoped_ptr<ui::LocatedEvent> converted00(ConvertEvent(window0.get(), original00)); EXPECT_EQ(original00.type(), converted00->type()); EXPECT_EQ("50,50", converted00->location().ToString()); EXPECT_EQ("50,50", converted00->root_location().ToString()); EXPECT_EQ(original00.flags(), converted00->flags()); // Start tracking from the RootWindow0 and converts the mouse event into // |window1|'s coodinates. ui::MouseEvent original01(ui::ET_MOUSE_DRAGGED, gfx::Point(350, 150), gfx::Point(350, 150), ui::EF_NONE); scoped_ptr<ui::LocatedEvent> converted01(ConvertEvent(window1.get(), original01)); EXPECT_EQ(original01.type(), converted01->type()); EXPECT_EQ("50,50", converted01->location().ToString()); EXPECT_EQ("150,150", converted01->root_location().ToString()); EXPECT_EQ(original01.flags(), converted01->flags()); // Make RootWindow1 active so that capture window is parented to it. Shell::GetInstance()->set_active_root_window(root_windows[1]); // Start tracking from the RootWindow1 and converts the mouse event into // |window0|'s coodinates. ui::MouseEvent original10(ui::ET_MOUSE_DRAGGED, gfx::Point(-150, 50), gfx::Point(-150, 50), ui::EF_NONE); scoped_ptr<ui::LocatedEvent> converted10(ConvertEvent(window0.get(), original10)); EXPECT_EQ(original10.type(), converted10->type()); EXPECT_EQ("50,50", converted10->location().ToString()); EXPECT_EQ("50,50", converted10->root_location().ToString()); EXPECT_EQ(original10.flags(), converted10->flags()); // Start tracking from the RootWindow1 and converts the mouse event into // |window1|'s coodinates. ui::MouseEvent original11(ui::ET_MOUSE_DRAGGED, gfx::Point(150, 150), gfx::Point(150, 150), ui::EF_NONE); scoped_ptr<ui::LocatedEvent> converted11(ConvertEvent(window1.get(), original11)); EXPECT_EQ(original11.type(), converted11->type()); EXPECT_EQ("50,50", converted11->location().ToString()); EXPECT_EQ("150,150", converted11->root_location().ToString()); EXPECT_EQ(original11.flags(), converted11->flags()); } } // namespace test } // namespace aura<|fim▁end|>
<|file_name|>0013_auto_20160223_1757.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('main', '0012_auto_20160204_1503'),<|fim▁hole|> ] operations = [ migrations.AlterModelOptions( name='eventassignment', options={'permissions': (('can_be_assigned', 'Can be assigned to events'),)}, ), migrations.AlterField( model_name='suggestedevent', name='status', field=models.CharField(default=b'created', max_length=40, choices=[(b'created', b'Created'), (b'submitted', b'Submitted'), (b'resubmitted', b'Resubmitted'), (b'rejected', b'Bounced back'), (b'retracted', b'Retracted'), (b'accepted', b'Accepted'), (b'removed', b'Removed')]), preserve_default=True, ), migrations.AlterField( model_name='template', name='content', field=models.TextField(help_text=b"The HTML framework for this template. Use <code>{{ any_variable_name }}</code> for per-event tags. Other Jinja2 constructs are available, along with the related <code>request</code>, <code>datetime</code>, <code>event</code> objects, and the <code>md5</code> function. You can also reference <code>autoplay</code> and it's always safe. Additionally we have <code>vidly_tokenize(tag, seconds)</code>, <code>edgecast_tokenize([seconds], **kwargs)</code> and <code>akamai_tokenize([seconds], **kwargs)</code><br> Warning! Changes affect all events associated with this template."), preserve_default=True, ), ]<|fim▁end|>
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six # PY3 support of some PY2 keywords (needs improved) if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ class config(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/l2vpn-evpn/prefix-limit/config. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Configuration parameters relating to the prefix limit for the AFI-SAFI """ __slots__ = ( "_path_helper", "_extmethods", "__max_prefixes", "__prevent_teardown", "__shutdown_threshold_pct", "__restart_timer", ) _yang_name = "config" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__max_prefixes = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..4294967295"]}, int_size=32, ), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint32", is_config=True, ) self.__prevent_teardown = YANGDynClass( base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=True, ) self.__shutdown_threshold_pct = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), restriction_dict={"range": ["0..100"]}, ), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="oc-types:percentage", is_config=True, ) self.__restart_timer = YANGDynClass( base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="decimal64", is_config=True, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "bgp", "peer-groups", "peer-group", "afi-safis", "afi-safi", "l2vpn-evpn", "prefix-limit", "config", ] def _get_max_prefixes(self): """ Getter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/config/max_prefixes (uint32) YANG Description: Maximum number of prefixes that will be accepted from the neighbour """ return self.__max_prefixes def _set_max_prefixes(self, v, load=False): """ Setter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/config/max_prefixes (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_max_prefixes is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_max_prefixes() directly. YANG Description: Maximum number of prefixes that will be accepted from the neighbour """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..4294967295"]}, int_size=32, ), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint32", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """max_prefixes must be of a type compatible with uint32""", "defined-type": "uint32", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=True)""", } ) self.__max_prefixes = t if hasattr(self, "_set"): self._set() def _unset_max_prefixes(self): self.__max_prefixes = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..4294967295"]}, int_size=32, ), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint32", is_config=True, ) def _get_prevent_teardown(self): """ Getter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/config/prevent_teardown (boolean) YANG Description: Do not tear down the BGP session when the maximum prefix limit is exceeded, but rather only log a warning. The default of this leaf is false, such that when it is not specified, the session is torn down. """ return self.__prevent_teardown def _set_prevent_teardown(self, v, load=False): """ Setter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/config/prevent_teardown (boolean) If this variable is read-only (config: false) in the source YANG file, then _set_prevent_teardown is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_prevent_teardown() directly. YANG Description: Do not tear down the BGP session when the maximum prefix limit is exceeded, but rather only log a warning. The default of this leaf is false, such that when it is not specified, the session is torn down. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """prevent_teardown must be of a type compatible with boolean""", "defined-type": "boolean", "generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""", } ) self.__prevent_teardown = t if hasattr(self, "_set"): self._set() def _unset_prevent_teardown(self): self.__prevent_teardown = YANGDynClass( base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=True, ) def _get_shutdown_threshold_pct(self): """ Getter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/config/shutdown_threshold_pct (oc-types:percentage) YANG Description: Threshold on number of prefixes that can be received from a neighbour before generation of warning messages or log entries. Expressed as a percentage of max-prefixes """ return self.__shutdown_threshold_pct def _set_shutdown_threshold_pct(self, v, load=False): """ Setter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/config/shutdown_threshold_pct (oc-types:percentage) If this variable is read-only (config: false) in the source YANG file, then _set_shutdown_threshold_pct is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_shutdown_threshold_pct() directly. YANG Description: Threshold on number of prefixes that can be received from a neighbour before generation of warning messages or log entries. Expressed as a percentage of max-prefixes """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8, ), restriction_dict={"range": ["0..100"]}, ), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="oc-types:percentage", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """shutdown_threshold_pct must be of a type compatible with oc-types:percentage""", "defined-type": "oc-types:percentage", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=True)""", } ) self.__shutdown_threshold_pct = t if hasattr(self, "_set"): self._set() def _unset_shutdown_threshold_pct(self): self.__shutdown_threshold_pct = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), restriction_dict={"range": ["0..100"]}, ), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="oc-types:percentage", is_config=True, ) def _get_restart_timer(self): """ Getter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/config/restart_timer (decimal64) YANG Description: Time interval in seconds after which the BGP session is re-established after being torn down due to exceeding the max-prefix limit. """ return self.__restart_timer def _set_restart_timer(self, v, load=False): """ Setter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/config/restart_timer (decimal64) If this variable is read-only (config: false) in the source YANG file, then _set_restart_timer is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_restart_timer() directly. YANG Description: Time interval in seconds after which the BGP session is re-established after being torn down due to exceeding the max-prefix limit. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="decimal64", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """restart_timer must be of a type compatible with decimal64""", "defined-type": "decimal64", "generated-type": """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=True)""", } ) self.__restart_timer = t if hasattr(self, "_set"): self._set() def _unset_restart_timer(self): self.__restart_timer = YANGDynClass( base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="decimal64", is_config=True, ) max_prefixes = __builtin__.property(_get_max_prefixes, _set_max_prefixes) prevent_teardown = __builtin__.property( _get_prevent_teardown, _set_prevent_teardown ) shutdown_threshold_pct = __builtin__.property( _get_shutdown_threshold_pct, _set_shutdown_threshold_pct ) restart_timer = __builtin__.property(_get_restart_timer, _set_restart_timer) _pyangbind_elements = OrderedDict( [ ("max_prefixes", max_prefixes), ("prevent_teardown", prevent_teardown), ("shutdown_threshold_pct", shutdown_threshold_pct), ("restart_timer", restart_timer), ] ) class config(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/l2vpn-evpn/prefix-limit/config. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Configuration parameters relating to the prefix limit for the AFI-SAFI """ __slots__ = ( "_path_helper", "_extmethods", "__max_prefixes", "__prevent_teardown", "__shutdown_threshold_pct", "__restart_timer", ) _yang_name = "config" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__max_prefixes = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..4294967295"]}, int_size=32, ), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint32", is_config=True, ) self.__prevent_teardown = YANGDynClass( base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=True, ) self.__shutdown_threshold_pct = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), restriction_dict={"range": ["0..100"]}, ), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="oc-types:percentage", is_config=True, ) self.__restart_timer = YANGDynClass( base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="decimal64", is_config=True, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "bgp", "peer-groups", "peer-group", "afi-safis", "afi-safi", "l2vpn-evpn", "prefix-limit", "config", ] def _get_max_prefixes(self): """ Getter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/config/max_prefixes (uint32) YANG Description: Maximum number of prefixes that will be accepted from the neighbour """ return self.__max_prefixes def _set_max_prefixes(self, v, load=False): """ Setter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/config/max_prefixes (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_max_prefixes is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_max_prefixes() directly. YANG Description: Maximum number of prefixes that will be accepted from the neighbour """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..4294967295"]}, int_size=32, ), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint32", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """max_prefixes must be of a type compatible with uint32""", "defined-type": "uint32", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=True)""", } ) self.__max_prefixes = t if hasattr(self, "_set"): self._set() def _unset_max_prefixes(self): self.__max_prefixes = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..4294967295"]}, int_size=32, ), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint32", is_config=True, ) def _get_prevent_teardown(self): """ Getter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/config/prevent_teardown (boolean) YANG Description: Do not tear down the BGP session when the maximum prefix limit is exceeded, but rather only log a warning. The default of this leaf is false, such that when it is not specified, the session is torn down. """ return self.__prevent_teardown def _set_prevent_teardown(self, v, load=False): """ Setter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/config/prevent_teardown (boolean) If this variable is read-only (config: false) in the source YANG file, then _set_prevent_teardown is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_prevent_teardown() directly. YANG Description: Do not tear down the BGP session when the maximum prefix limit is exceeded, but rather only log a warning. The default of this leaf is false, such that when it is not specified, the session is torn down. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """prevent_teardown must be of a type compatible with boolean""", "defined-type": "boolean", "generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""", } ) self.__prevent_teardown = t if hasattr(self, "_set"): self._set() def _unset_prevent_teardown(self): self.__prevent_teardown = YANGDynClass( base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=True, ) def _get_shutdown_threshold_pct(self): """ Getter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/config/shutdown_threshold_pct (oc-types:percentage) YANG Description: Threshold on number of prefixes that can be received from a neighbour before generation of warning messages or log entries. Expressed as a percentage of max-prefixes """<|fim▁hole|> Setter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/config/shutdown_threshold_pct (oc-types:percentage) If this variable is read-only (config: false) in the source YANG file, then _set_shutdown_threshold_pct is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_shutdown_threshold_pct() directly. YANG Description: Threshold on number of prefixes that can be received from a neighbour before generation of warning messages or log entries. Expressed as a percentage of max-prefixes """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8, ), restriction_dict={"range": ["0..100"]}, ), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="oc-types:percentage", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """shutdown_threshold_pct must be of a type compatible with oc-types:percentage""", "defined-type": "oc-types:percentage", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=True)""", } ) self.__shutdown_threshold_pct = t if hasattr(self, "_set"): self._set() def _unset_shutdown_threshold_pct(self): self.__shutdown_threshold_pct = YANGDynClass( base=RestrictedClassType( base_type=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8 ), restriction_dict={"range": ["0..100"]}, ), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="oc-types:percentage", is_config=True, ) def _get_restart_timer(self): """ Getter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/config/restart_timer (decimal64) YANG Description: Time interval in seconds after which the BGP session is re-established after being torn down due to exceeding the max-prefix limit. """ return self.__restart_timer def _set_restart_timer(self, v, load=False): """ Setter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/l2vpn_evpn/prefix_limit/config/restart_timer (decimal64) If this variable is read-only (config: false) in the source YANG file, then _set_restart_timer is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_restart_timer() directly. YANG Description: Time interval in seconds after which the BGP session is re-established after being torn down due to exceeding the max-prefix limit. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="decimal64", is_config=True, ) except (TypeError, ValueError): raise ValueError( { "error-string": """restart_timer must be of a type compatible with decimal64""", "defined-type": "decimal64", "generated-type": """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=True)""", } ) self.__restart_timer = t if hasattr(self, "_set"): self._set() def _unset_restart_timer(self): self.__restart_timer = YANGDynClass( base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="decimal64", is_config=True, ) max_prefixes = __builtin__.property(_get_max_prefixes, _set_max_prefixes) prevent_teardown = __builtin__.property( _get_prevent_teardown, _set_prevent_teardown ) shutdown_threshold_pct = __builtin__.property( _get_shutdown_threshold_pct, _set_shutdown_threshold_pct ) restart_timer = __builtin__.property(_get_restart_timer, _set_restart_timer) _pyangbind_elements = OrderedDict( [ ("max_prefixes", max_prefixes), ("prevent_teardown", prevent_teardown), ("shutdown_threshold_pct", shutdown_threshold_pct), ("restart_timer", restart_timer), ] )<|fim▁end|>
return self.__shutdown_threshold_pct def _set_shutdown_threshold_pct(self, v, load=False): """
<|file_name|>regions-outlives-projection-container-wc.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. <|fim▁hole|>// type of a bound that appears in the where clause on a struct must // outlive the location in which the type appears, even when the // constraint is in a where clause not a bound. Issue #22246. #![allow(dead_code)] /////////////////////////////////////////////////////////////////////////// pub trait TheTrait { type TheAssocType; } pub struct TheType<'b> { m: [fn(&'b()); 0] } impl<'b> TheTrait for TheType<'b> { type TheAssocType = &'b (); } /////////////////////////////////////////////////////////////////////////// pub struct WithAssoc<T> where T : TheTrait { m: [T; 0] } fn with_assoc<'a,'b>() { // For this type to be valid, the rules require that all // associated types of traits that appear in `WithAssoc` must // outlive 'a. In this case, that means TheType<'b>::TheAssocType, // which is &'b (), must outlive 'a. // FIXME (#54943) NLL doesn't enforce WF condition in unreachable code if // `_x` is changed to `_` let _x: &'a WithAssoc<TheType<'b>> = loop { }; //~^ ERROR reference has a longer lifetime } fn main() { }<|fim▁end|>
// Test that we are imposing the requirement that every associated
<|file_name|>use.rs<|end_file_name|><|fim▁begin|>// ignore-fast // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. //<|fim▁hole|>// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-fast #[allow(unused_imports)]; #[no_std]; extern crate std; extern crate zed = "std"; extern crate bar = "std#0.10"; use std::str; use x = zed::str; mod baz { pub use bar::str; pub use x = std::str; } #[start] pub fn start(_: int, _: **u8) -> int { 0 }<|fim▁end|>
<|file_name|>step4.py<|end_file_name|><|fim▁begin|># 14. print_log('\n14. Issuer (Trust Anchor) is creating a Credential Offer for Prover\n') cred_offer_json = await anoncreds.issuer_create_credential_offer(issuer_wallet_handle, cred_def_id) print_log('Credential Offer: ') pprint.pprint(json.loads(cred_offer_json)) # 15. print_log('\n15. Prover creates Credential Request for the given credential offer\n') (cred_req_json, cred_req_metadata_json) = \ await anoncreds.prover_create_credential_req(prover_wallet_handle, prover_did, cred_offer_json, cred_def_json, prover_link_secret_name) print_log('Credential Request: ') pprint.pprint(json.loads(cred_req_json)) # 16. print_log('\n16. Issuer (Trust Anchor) creates Credential for Credential Request\n') cred_values_json = json.dumps({ "sex": {"raw": "male", "encoded": "5944657099558967239210949258394887428692050081607692519917050011144233"}, "name": {"raw": "Alex", "encoded": "1139481716457488690172217916278103335"}, "height": {"raw": "175", "encoded": "175"}, "age": {"raw": "28", "encoded": "28"} }) (cred_json, _, _) = \ await anoncreds.issuer_create_credential(issuer_wallet_handle, cred_offer_json, cred_req_json, cred_values_json, None, None) print_log('Credential: ') pprint.pprint(json.loads(cred_json)) # 17. print_log('\n17. Prover processes and stores received Credential\n') await anoncreds.prover_store_credential(prover_wallet_handle, None, cred_req_metadata_json, cred_json, cred_def_json, None) # 18. print_log('\n18. Closing both wallet_handles and pool\n') await wallet.close_wallet(issuer_wallet_handle) await wallet.close_wallet(prover_wallet_handle)<|fim▁hole|> await pool.close_pool_ledger(pool_handle) # 19. print_log('\n19. Deleting created wallet_handles\n') await wallet.delete_wallet(issuer_wallet_config, issuer_wallet_credentials) await wallet.delete_wallet(prover_wallet_config, prover_wallet_credentials) # 20. print_log('\n20. Deleting pool ledger config\n') await pool.delete_pool_ledger_config(pool_name)<|fim▁end|>
<|file_name|>configure.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # # Copyright 2001 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Script that generates the build.ninja for ninja itself. Projects that use ninja themselves should either write a similar script or use a meta-build system that supports Ninja output.""" from __future__ import print_function from optparse import OptionParser import os import pipes import string import subprocess import sys sourcedir = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, os.path.join(sourcedir, 'misc')) import ninja_syntax class Platform(object): """Represents a host/target platform and its specific build attributes.""" def __init__(self, platform): self._platform = platform if self._platform is not None: return self._platform = sys.platform if self._platform.startswith('linux'): self._platform = 'linux' elif self._platform.startswith('freebsd'): self._platform = 'freebsd' elif self._platform.startswith('gnukfreebsd'): self._platform = 'freebsd' elif self._platform.startswith('openbsd'): self._platform = 'openbsd' elif self._platform.startswith('solaris') or self._platform == 'sunos5': self._platform = 'solaris' elif self._platform.startswith('mingw'): self._platform = 'mingw' elif self._platform.startswith('win'): self._platform = 'msvc' elif self._platform.startswith('bitrig'): self._platform = 'bitrig' elif self._platform.startswith('netbsd'): self._platform = 'netbsd' elif self._platform.startswith('aix'): self._platform = 'aix' elif self._platform.startswith('dragonfly'): self._platform = 'dragonfly' @staticmethod def known_platforms(): return ['linux', 'darwin', 'freebsd', 'openbsd', 'solaris', 'sunos5', 'mingw', 'msvc', 'gnukfreebsd', 'bitrig', 'netbsd', 'aix', 'dragonfly'] def platform(self): return self._platform def is_linux(self): return self._platform == 'linux' def is_mingw(self): return self._platform == 'mingw' def is_msvc(self): return self._platform == 'msvc' def msvc_needs_fs(self): popen = subprocess.Popen(['cl', '/nologo', '/?'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = popen.communicate() return b'/FS' in out def is_windows(self): return self.is_mingw() or self.is_msvc() def is_solaris(self): return self._platform == 'solaris' def is_aix(self): return self._platform == 'aix' def uses_usr_local(self): return self._platform in ('freebsd', 'openbsd', 'bitrig', 'dragonfly', 'netbsd') def supports_ppoll(self): return self._platform in ('freebsd', 'linux', 'openbsd', 'bitrig', 'dragonfly') def supports_ninja_browse(self): return (not self.is_windows() and not self.is_solaris() and not self.is_aix()) def can_rebuild_in_place(self): return not (self.is_windows() or self.is_aix()) class Bootstrap: """API shim for ninja_syntax.Writer that instead runs the commands. Used to bootstrap Ninja from scratch. In --bootstrap mode this class is used to execute all the commands to build an executable. It also proxies all calls to an underlying ninja_syntax.Writer, to behave like non-bootstrap mode. """ def __init__(self, writer, verbose=False): self.writer = writer self.verbose = verbose # Map of variable name => expanded variable value. self.vars = {} # Map of rule name => dict of rule attributes. self.rules = { 'phony': {} } def comment(self, text): return self.writer.comment(text) def newline(self): return self.writer.newline() def variable(self, key, val): # In bootstrap mode, we have no ninja process to catch /showIncludes # output. self.vars[key] = self._expand(val).replace('/showIncludes', '') return self.writer.variable(key, val) def rule(self, name, **kwargs): self.rules[name] = kwargs return self.writer.rule(name, **kwargs) def build(self, outputs, rule, inputs=None, **kwargs): ruleattr = self.rules[rule] cmd = ruleattr.get('command') if cmd is None: # A phony rule, for example. return<|fim▁hole|> local_vars = { 'in': self._expand_paths(inputs), 'out': self._expand_paths(outputs) } for key, val in kwargs.get('variables', []): local_vars[key] = ' '.join(ninja_syntax.as_list(val)) self._run_command(self._expand(cmd, local_vars)) return self.writer.build(outputs, rule, inputs, **kwargs) def default(self, paths): return self.writer.default(paths) def _expand_paths(self, paths): """Expand $vars in an array of paths, e.g. from a 'build' block.""" paths = ninja_syntax.as_list(paths) return ' '.join(map(self._shell_escape, (map(self._expand, paths)))) def _expand(self, str, local_vars={}): """Expand $vars in a string.""" return ninja_syntax.expand(str, self.vars, local_vars) def _shell_escape(self, path): """Quote paths containing spaces.""" return '"%s"' % path if ' ' in path else path def _run_command(self, cmdline): """Run a subcommand, quietly. Prints the full command on error.""" try: if self.verbose: print(cmdline) subprocess.check_call(cmdline, shell=True) except subprocess.CalledProcessError: print('when running: ', cmdline) raise parser = OptionParser() profilers = ['gmon', 'pprof'] parser.add_option('--bootstrap', action='store_true', help='bootstrap a ninja binary from nothing') parser.add_option('--verbose', action='store_true', help='enable verbose build') parser.add_option('--platform', help='target platform (' + '/'.join(Platform.known_platforms()) + ')', choices=Platform.known_platforms()) parser.add_option('--host', help='host platform (' + '/'.join(Platform.known_platforms()) + ')', choices=Platform.known_platforms()) parser.add_option('--debug', action='store_true', help='enable debugging extras',) parser.add_option('--profile', metavar='TYPE', choices=profilers, help='enable profiling (' + '/'.join(profilers) + ')',) parser.add_option('--with-gtest', metavar='PATH', help='ignored') parser.add_option('--with-python', metavar='EXE', help='use EXE as the Python interpreter', default=os.path.basename(sys.executable)) parser.add_option('--force-pselect', action='store_true', help='ppoll() is used by default where available, ' 'but some platforms may need to use pselect instead',) (options, args) = parser.parse_args() if args: print('ERROR: extra unparsed command-line arguments:', args) sys.exit(1) platform = Platform(options.platform) if options.host: host = Platform(options.host) else: host = platform BUILD_FILENAME = 'build.ninja' ninja_writer = ninja_syntax.Writer(open(BUILD_FILENAME, 'w')) n = ninja_writer if options.bootstrap: # Make the build directory. try: os.mkdir('build') except OSError: pass # Wrap ninja_writer with the Bootstrapper, which also executes the # commands. print('bootstrapping ninja...') n = Bootstrap(n, verbose=options.verbose) n.comment('This file is used to build ninja itself.') n.comment('It is generated by ' + os.path.basename(__file__) + '.') n.newline() n.variable('ninja_required_version', '1.3') n.newline() n.comment('The arguments passed to configure.py, for rerunning it.') configure_args = sys.argv[1:] if '--bootstrap' in configure_args: configure_args.remove('--bootstrap') n.variable('configure_args', ' '.join(configure_args)) env_keys = set(['CXX', 'AR', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS']) configure_env = dict((k, os.environ[k]) for k in os.environ if k in env_keys) if configure_env: config_str = ' '.join([k + '=' + pipes.quote(configure_env[k]) for k in configure_env]) n.variable('configure_env', config_str + '$ ') n.newline() CXX = configure_env.get('CXX', 'g++') objext = '.o' if platform.is_msvc(): CXX = 'cl' objext = '.obj' def src(filename): return os.path.join('$root', 'src', filename) def built(filename): return os.path.join('$builddir', filename) def doc(filename): return os.path.join('$root', 'doc', filename) def cc(name, **kwargs): return n.build(built(name + objext), 'cxx', src(name + '.c'), **kwargs) def cxx(name, **kwargs): return n.build(built(name + objext), 'cxx', src(name + '.cc'), **kwargs) def binary(name): if platform.is_windows(): exe = name + '.exe' n.build(name, 'phony', exe) return exe return name root = sourcedir if root == os.getcwd(): # In the common case where we're building directly in the source # tree, simplify all the paths to just be cwd-relative. root = '.' n.variable('root', root) n.variable('builddir', 'build') n.variable('cxx', CXX) if platform.is_msvc(): n.variable('ar', 'link') else: n.variable('ar', configure_env.get('AR', 'ar')) if platform.is_msvc(): cflags = ['/showIncludes', '/nologo', # Don't print startup banner. '/Zi', # Create pdb with debug info. '/W4', # Highest warning level. '/WX', # Warnings as errors. '/wd4530', '/wd4100', '/wd4706', '/wd4244', '/wd4512', '/wd4800', '/wd4702', '/wd4819', # Disable warnings about constant conditional expressions. '/wd4127', # Disable warnings about passing "this" during initialization. '/wd4355', # Disable warnings about ignored typedef in DbgHelp.h '/wd4091', '/GR-', # Disable RTTI. # Disable size_t -> int truncation warning. # We never have strings or arrays larger than 2**31. '/wd4267', '/DNOMINMAX', '/D_CRT_SECURE_NO_WARNINGS', '/D_HAS_EXCEPTIONS=0', '/DNINJA_PYTHON="%s"' % options.with_python] if platform.msvc_needs_fs(): cflags.append('/FS') ldflags = ['/DEBUG', '/libpath:$builddir'] if not options.debug: cflags += ['/Ox', '/DNDEBUG', '/GL'] ldflags += ['/LTCG', '/OPT:REF', '/OPT:ICF'] else: cflags = ['-g', '-Wall', '-Wextra', '-Wno-deprecated', '-Wno-missing-field-initializers', '-Wno-unused-parameter', '-fno-rtti', '-fno-exceptions', '-fvisibility=hidden', '-pipe', '-DNINJA_PYTHON="%s"' % options.with_python] if options.debug: cflags += ['-D_GLIBCXX_DEBUG', '-D_GLIBCXX_DEBUG_PEDANTIC'] cflags.remove('-fno-rtti') # Needed for above pedanticness. else: cflags += ['-O2', '-DNDEBUG'] try: proc = subprocess.Popen( [CXX, '-fdiagnostics-color', '-c', '-x', 'c++', '/dev/null', '-o', '/dev/null'], stdout=open(os.devnull, 'wb'), stderr=subprocess.STDOUT) if proc.wait() == 0: cflags += ['-fdiagnostics-color'] except: pass if platform.is_mingw(): cflags += ['-D_WIN32_WINNT=0x0501'] ldflags = ['-L$builddir'] if platform.uses_usr_local(): cflags.append('-I/usr/local/include') ldflags.append('-L/usr/local/lib') if platform.is_aix(): # printf formats for int64_t, uint64_t; large file support cflags.append('-D__STDC_FORMAT_MACROS') cflags.append('-D_LARGE_FILES') libs = [] if platform.is_mingw(): cflags.remove('-fvisibility=hidden'); ldflags.append('-static') elif platform.is_solaris(): cflags.remove('-fvisibility=hidden') elif platform.is_aix(): cflags.remove('-fvisibility=hidden') elif platform.is_msvc(): pass else: if options.profile == 'gmon': cflags.append('-pg') ldflags.append('-pg') elif options.profile == 'pprof': cflags.append('-fno-omit-frame-pointer') libs.extend(['-Wl,--no-as-needed', '-lprofiler']) if platform.supports_ppoll() and not options.force_pselect: cflags.append('-DUSE_PPOLL') if platform.supports_ninja_browse(): cflags.append('-DNINJA_HAVE_BROWSE') # Search for generated headers relative to build dir. cflags.append('-I.') def shell_escape(str): """Escape str such that it's interpreted as a single argument by the shell.""" # This isn't complete, but it's just enough to make NINJA_PYTHON work. if platform.is_windows(): return str if '"' in str: return "'%s'" % str.replace("'", "\\'") return str if 'CFLAGS' in configure_env: cflags.append(configure_env['CFLAGS']) ldflags.append(configure_env['CFLAGS']) if 'CXXFLAGS' in configure_env: cflags.append(configure_env['CXXFLAGS']) ldflags.append(configure_env['CXXFLAGS']) n.variable('cflags', ' '.join(shell_escape(flag) for flag in cflags)) if 'LDFLAGS' in configure_env: ldflags.append(configure_env['LDFLAGS']) n.variable('ldflags', ' '.join(shell_escape(flag) for flag in ldflags)) n.newline() if platform.is_msvc(): n.rule('cxx', command='$cxx $cflags -c $in /Fo$out', description='CXX $out', deps='msvc' # /showIncludes is included in $cflags. ) else: n.rule('cxx', command='$cxx -MMD -MT $out -MF $out.d $cflags -c $in -o $out', depfile='$out.d', deps='gcc', description='CXX $out') n.newline() if host.is_msvc(): n.rule('ar', command='lib /nologo /ltcg /out:$out $in', description='LIB $out') elif host.is_mingw(): n.rule('ar', command='cmd /c $ar cqs $out.tmp $in && move /Y $out.tmp $out', description='AR $out') else: n.rule('ar', command='rm -f $out && $ar crs $out $in', description='AR $out') n.newline() if platform.is_msvc(): n.rule('link', command='$cxx $in $libs /nologo /link $ldflags /out:$out', description='LINK $out') else: n.rule('link', command='$cxx $ldflags -o $out $in $libs', description='LINK $out') n.newline() objs = [] if platform.supports_ninja_browse(): n.comment('browse_py.h is used to inline browse.py.') n.rule('inline', command='"%s"' % src('inline.sh') + ' $varname < $in > $out', description='INLINE $out') n.build(built('browse_py.h'), 'inline', src('browse.py'), implicit=src('inline.sh'), variables=[('varname', 'kBrowsePy')]) n.newline() objs += cxx('browse', order_only=built('browse_py.h')) n.newline() n.comment('the depfile parser and ninja lexers are generated using re2c.') def has_re2c(): try: proc = subprocess.Popen(['re2c', '-V'], stdout=subprocess.PIPE) return int(proc.communicate()[0], 10) >= 1103 except OSError: return False if has_re2c(): n.rule('re2c', command='re2c -b -i --no-generation-date -o $out $in', description='RE2C $out') # Generate the .cc files in the source directory so we can check them in. n.build(src('depfile_parser.cc'), 're2c', src('depfile_parser.in.cc')) n.build(src('lexer.cc'), 're2c', src('lexer.in.cc')) else: print("warning: A compatible version of re2c (>= 0.11.3) was not found; " "changes to src/*.in.cc will not affect your build.") n.newline() n.comment('Core source files all build into ninja library.') for name in ['build', 'build_log', 'clean', 'clparser', 'debug_flags', 'depfile_parser', 'deps_log', 'disk_interface', 'edit_distance', 'eval_env', 'graph', 'graphviz', 'lexer', 'line_printer', 'manifest_parser', 'metrics', 'state', 'string_piece_util', 'util', 'version']: objs += cxx(name) if platform.is_windows(): for name in ['subprocess-win32', 'includes_normalize-win32', 'msvc_helper-win32', 'msvc_helper_main-win32']: objs += cxx(name) if platform.is_msvc(): objs += cxx('minidump-win32') objs += cc('getopt') else: objs += cxx('subprocess-posix') if platform.is_aix(): objs += cc('getopt') if platform.is_msvc(): ninja_lib = n.build(built('ninja.lib'), 'ar', objs) else: ninja_lib = n.build(built('libninja.a'), 'ar', objs) n.newline() if platform.is_msvc(): libs.append('ninja.lib') else: libs.append('-lninja') if platform.is_aix(): libs.append('-lperfstat') all_targets = [] n.comment('Main executable is library plus main() function.') objs = cxx('ninja') ninja = n.build(binary('ninja'), 'link', objs, implicit=ninja_lib, variables=[('libs', libs)]) n.newline() all_targets += ninja if options.bootstrap: # We've built the ninja binary. Don't run any more commands # through the bootstrap executor, but continue writing the # build.ninja file. n = ninja_writer n.comment('Tests all build into ninja_test executable.') objs = [] for name in ['build_log_test', 'build_test', 'clean_test', 'clparser_test', 'depfile_parser_test', 'deps_log_test', 'disk_interface_test', 'edit_distance_test', 'graph_test', 'lexer_test', 'manifest_parser_test', 'ninja_test', 'state_test', 'string_piece_util_test', 'subprocess_test', 'test', 'util_test']: objs += cxx(name) if platform.is_windows(): for name in ['includes_normalize_test', 'msvc_helper_test']: objs += cxx(name) ninja_test = n.build(binary('ninja_test'), 'link', objs, implicit=ninja_lib, variables=[('libs', libs)]) n.newline() all_targets += ninja_test n.comment('Ancillary executables.') for name in ['build_log_perftest', 'canon_perftest', 'depfile_parser_perftest', 'hash_collision_bench', 'manifest_parser_perftest', 'clparser_perftest']: objs = cxx(name) all_targets += n.build(binary(name), 'link', objs, implicit=ninja_lib, variables=[('libs', libs)]) n.newline() n.comment('Generate a graph using the "graph" tool.') n.rule('gendot', command='./ninja -t graph all > $out') n.rule('gengraph', command='dot -Tpng $in > $out') dot = n.build(built('graph.dot'), 'gendot', ['ninja', 'build.ninja']) n.build('graph.png', 'gengraph', dot) n.newline() n.comment('Generate the manual using asciidoc.') n.rule('asciidoc', command='asciidoc -b docbook -d book -o $out $in', description='ASCIIDOC $out') n.rule('xsltproc', command='xsltproc --nonet doc/docbook.xsl $in > $out', description='XSLTPROC $out') docbookxml = n.build(built('manual.xml'), 'asciidoc', doc('manual.asciidoc')) manual = n.build(doc('manual.html'), 'xsltproc', docbookxml, implicit=[doc('style.css'), doc('docbook.xsl')]) n.build('manual', 'phony', order_only=manual) n.newline() n.rule('dblatex', command='dblatex -q -o $out -p doc/dblatex.xsl $in', description='DBLATEX $out') n.build(doc('manual.pdf'), 'dblatex', docbookxml, implicit=[doc('dblatex.xsl')]) n.comment('Generate Doxygen.') n.rule('doxygen', command='doxygen $in', description='DOXYGEN $in') n.variable('doxygen_mainpage_generator', src('gen_doxygen_mainpage.sh')) n.rule('doxygen_mainpage', command='$doxygen_mainpage_generator $in > $out', description='DOXYGEN_MAINPAGE $out') mainpage = n.build(built('doxygen_mainpage'), 'doxygen_mainpage', ['README', 'COPYING'], implicit=['$doxygen_mainpage_generator']) n.build('doxygen', 'doxygen', doc('doxygen.config'), implicit=mainpage) n.newline() if not host.is_mingw(): n.comment('Regenerate build files if build script changes.') n.rule('configure', command='${configure_env}%s $root/configure.py $configure_args' % options.with_python, generator=True) n.build('build.ninja', 'configure', implicit=['$root/configure.py', os.path.normpath('$root/misc/ninja_syntax.py')]) n.newline() n.default(ninja) n.newline() if host.is_linux(): n.comment('Packaging') n.rule('rpmbuild', command="misc/packaging/rpmbuild.sh", description='Building rpms..') n.build('rpm', 'rpmbuild') n.newline() n.build('all', 'phony', all_targets) n.close() print('wrote %s.' % BUILD_FILENAME) if options.bootstrap: print('bootstrap complete. rebuilding...') rebuild_args = [] if platform.can_rebuild_in_place(): rebuild_args.append('./ninja') else: if platform.is_windows(): bootstrap_exe = 'ninja.bootstrap.exe' final_exe = 'ninja.exe' else: bootstrap_exe = './ninja.bootstrap' final_exe = './ninja' if os.path.exists(bootstrap_exe): os.unlink(bootstrap_exe) os.rename(final_exe, bootstrap_exe) rebuild_args.append(bootstrap_exe) if options.verbose: rebuild_args.append('-v') subprocess.check_call(rebuild_args)<|fim▁end|>
# Implement just enough of Ninja variable expansion etc. to # make the bootstrap build work.
<|file_name|>common.go<|end_file_name|><|fim▁begin|>package service import ( "encoding/json" "log" "net/http" ) // StandardErrorRespModel ... type StandardErrorRespModel struct { ErrorMessage string `json:"error"` } // ----------------- // --- Generic --- // RespondWith ... func RespondWith(w http.ResponseWriter, httpStatusCode int, respModel interface{}) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(httpStatusCode) if err := json.NewEncoder(w).Encode(&respModel); err != nil { log.Println(" [!] Exception: RespondWith: Error: ", err)<|fim▁hole|>} // ----------------- // --- Successes --- // RespondWithSuccessOK ... func RespondWithSuccessOK(w http.ResponseWriter, respModel interface{}) { RespondWith(w, http.StatusOK, respModel) } // -------------- // --- Errors --- // RespondWithBadRequestError ... func RespondWithBadRequestError(w http.ResponseWriter, errMsg string) { RespondWithError(w, http.StatusBadRequest, errMsg) } // RespondWithNotFoundError ... func RespondWithNotFoundError(w http.ResponseWriter, errMsg string) { RespondWithError(w, http.StatusNotFound, errMsg) } // RespondWithError ... func RespondWithError(w http.ResponseWriter, httpErrCode int, errMsg string) { resp := StandardErrorRespModel{ ErrorMessage: errMsg, } RespondWithErrorJSON(w, httpErrCode, resp) } // RespondWithErrorJSON ... func RespondWithErrorJSON(w http.ResponseWriter, httpErrCode int, respModel interface{}) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(httpErrCode) if err := json.NewEncoder(w).Encode(&respModel); err != nil { log.Println(" [!] Exception: RespondWithErrorJSON: Error: ", err) } }<|fim▁end|>
}
<|file_name|>error.rs<|end_file_name|><|fim▁begin|>use std; use std::fmt; use std::fmt::Display; use std::io; use std::num; use std::string; use serde::{de, ser}; /// Alias for a `Result` with the error type [`serde_osc::error::Error`]. /// /// [`serde_osc::error::Error`]: enum.Error.html pub type ResultE<T> = Result<T, Error>; /// Unified error type used in both serialization and deserialization. #[derive(Debug)] pub enum Error { /// User provided error message (via `serde::de::Error::custom`) Message(String), /// Unknown argument type (i.e. not a 'f'=f32, 'i'=i32, etc) UnsupportedType, /// Packet doesn't obey correct format; mismatched lengths, or /// attempt to read more arguments than were in the typestring (e.g.) BadFormat, /// OSC expects all data to be aligned to 4 bytes lengths. /// Likely violators of this are strings, especially those at the end of a packet. BadPadding, /// Error encountered due to `std::io::Read` Io(io::Error), /// Error converting between parsed type and what it represents. /// e.g. OSC spec uses i32 for lengths, which we cast to u64, but that could underflow. BadCast(num::TryFromIntError), /// We store ascii strings as UTF-8.<|fim▁hole|>} /// Conversion from `io::Error` for use with the `?` operator impl From<io::Error> for Error { fn from(e: io::Error) -> Self { Error::Io(e) } } /// Conversion from `num::TryFromIntError` for use with the `?` operator impl From<num::TryFromIntError> for Error { fn from(e: num::TryFromIntError) -> Self { Error::BadCast(e) } } /// Conversion from `string::FromUtf8Error` for use with the `?` operator impl From<string::FromUtf8Error> for Error { fn from(e: string::FromUtf8Error) -> Self { Error::StrParseError(e) } } impl Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Error::Message(ref msg) => write!(f, "serde_osc error: {}", msg), Error::UnsupportedType => write!(f, "Unsupported OSC type"), Error::BadFormat => write!(f, "Bad OSC packet format"), Error::BadPadding => write!(f, "OSC data not padded to 4-byte boundary"), Error::Io(ref err) => err.fmt(f), Error::BadCast(ref err) => err.fmt(f), Error::StrParseError(_) => write!(f, "OSC string contains illegal (non-ascii) characters"), } } } impl std::error::Error for Error { fn description(&self) -> &str { match *self { Error::Message(ref msg) => msg, Error::UnsupportedType => "Unsupported OSC type", Error::BadFormat => "OSC argument count mismatch", Error::BadPadding => "Incorrect OSC data padding", Error::Io(ref io_error) => io_error.description(), Error::BadCast(ref cast_error) => cast_error.description(), Error::StrParseError(ref utf_error) => utf_error.description(), } } fn cause(&self) -> Option<&std::error::Error> { match *self { Error::Io(ref io_error) => Some(io_error), Error::BadCast(ref cast_error) => Some(cast_error), Error::StrParseError(ref utf_error) => Some(utf_error), _ => None, } } } impl de::Error for Error { fn custom<T: Display>(msg: T) -> Self { Error::Message(msg.to_string()) } } impl ser::Error for Error { fn custom<T: Display>(msg: T) -> Self { Error::Message(msg.to_string()) } }<|fim▁end|>
/// Technically, this is safe, but if we received non-ascii data, we could have invalid UTF-8 StrParseError(string::FromUtf8Error),
<|file_name|>detector.py<|end_file_name|><|fim▁begin|>""" Protocol Buffer Breaking Change Detector This tool is used to detect "breaking changes" in protobuf files, to ensure proper backwards-compatibility in protobuf API updates. The tool can check for breaking changes of a single API by taking 2 .proto file paths as input (before and after) and outputting a bool `is_breaking`. The breaking change detector creates a temporary directory, copies in each file to compute a protobuf "state", computes a diff of the "before" and "after" states, and runs the diff against a set of rules to determine if there was a breaking change. The tool is currently implemented with buf (https://buf.build/) """ from pathlib import Path from typing import List from tools.api_proto_breaking_change_detector.buf_utils import check_breaking, pull_buf_deps from tools.api_proto_breaking_change_detector.detector_errors import ChangeDetectorError class ProtoBreakingChangeDetector(object): """Abstract breaking change detector interface""" def run_detector(self) -> None: """Run the breaking change detector to detect rule violations This method should populate the detector's internal data such that `is_breaking` does not require any additional invocations to the breaking change detector. """ pass def is_breaking(self) -> bool: """Return True if breaking changes were detected in the given protos""" pass def get_breaking_changes(self) -> List[str]: """Return a list of strings containing breaking changes output by the tool""" pass class BufWrapper(ProtoBreakingChangeDetector): """Breaking change detector implemented with buf""" def __init__( self, path_to_changed_dir: str, git_ref: str, git_path: str, subdir: str = None, buf_path: str = None, config_file_loc: str = None, additional_args: List[str] = None) -> None: """Initialize the configuration of buf This function sets up any necessary config without actually running buf against any proto files. BufWrapper takes a path to a directory containing proto files as input, and it checks if these proto files break any changes from a given initial state. The initial state is input as a git ref. The constructor expects a git ref string, as well as an absolute path to a .git folder for the repository. Args: path_to_changed_dir {str} -- absolute path to a directory containing proto files in the after state buf_path {str} -- path to the buf binary (default: "buf") git_ref {str} -- git reference to use for the initial state of the protos (typically a commit hash) git_path {str} -- absolute path to .git folder for the repository of interest subdir {str} -- subdirectory within git repository from which to search for .proto files (default: None, e.g. stay in root) additional_args {List[str]} -- additional arguments passed into the buf binary invocations config_file_loc {str} -- absolute path to buf.yaml configuration file (if not provided, uses default buf configuration) """ if not Path(path_to_changed_dir).is_dir(): raise ValueError(f"path_to_changed_dir {path_to_changed_dir} is not a valid directory") if Path.cwd() not in Path(path_to_changed_dir).parents: raise ValueError( f"path_to_changed_dir {path_to_changed_dir} must be a subdirectory of the cwd ({ Path.cwd() })" ) if not Path(git_path).exists(): raise ChangeDetectorError(f'path to .git folder {git_path} does not exist') self._path_to_changed_dir = path_to_changed_dir self._additional_args = additional_args self._buf_path = buf_path or "buf" self._config_file_loc = config_file_loc self._git_ref = git_ref self._git_path = git_path self._subdir = subdir self._final_result = None pull_buf_deps( self._buf_path, self._path_to_changed_dir, config_file_loc=self._config_file_loc, additional_args=self._additional_args) def run_detector(self) -> None: self._final_result = check_breaking( self._buf_path, self._path_to_changed_dir, git_ref=self._git_ref, git_path=self._git_path, subdir=self._subdir, config_file_loc=self._config_file_loc, additional_args=self._additional_args) def is_breaking(self) -> bool: if not self._final_result: raise ChangeDetectorError("Must invoke run_detector() before checking if is_breaking()") final_code, final_out, final_err = self._final_result final_out, final_err = '\n'.join(final_out), '\n'.join(final_err) if final_err != "": raise ChangeDetectorError(f"Error from buf: {final_err}") <|fim▁hole|> return True if final_out != "": return True return False def get_breaking_changes(self) -> List[str]: _, final_out, _ = self._final_result return filter(lambda x: len(x) > 0, final_out) if self.is_breaking() else []<|fim▁end|>
if final_code != 0:
<|file_name|>build.rs<|end_file_name|><|fim▁begin|>//#![feature(plugin)] //#![plugin(bindgen_plugin)] //#[allow(dead_code, uppercase_variables, non_camel_case_types)] //#[plugin(bindgen_plugin)] //mod mysql_bindings { // bindgen!("/usr/include/mysql/mysql.h", match="mysql.h", link="mysql"); //} //use std::env; //use std::fs; //use std::path::Path; //use std::process::Command; extern crate libbindgen; use std::env; use std::path::Path; <|fim▁hole|> .header("cassandra.h") .use_core() .generate().unwrap() .write_to_file(Path::new("./src/").join("cassandra.rs")); if let Some(datastax_dir) = option_env!("CASSANDRA_SYS_LIB_PATH") { for p in datastax_dir.split(";") { println!("cargo:rustc-link-search={}", p); } } println!("cargo:rustc-flags=-l dylib=crypto"); println!("cargo:rustc-flags=-l dylib=ssl"); println!("cargo:rustc-flags=-l dylib=stdc++"); println!("cargo:rustc-flags=-l dylib=uv"); println!("cargo:rustc-link-search={}", "/usr/lib/x86_64-linux-gnu"); println!("cargo:rustc-link-search={}", "/usr/local/lib/x86_64-linux-gnu"); println!("cargo:rustc-link-search={}", "/usr/local/lib64"); println!("cargo:rustc-link-search={}", "/usr/local/lib"); println!("cargo:rustc-link-search={}", "/usr/lib64/"); println!("cargo:rustc-link-search={}", "/usr/lib/"); println!("cargo:rustc-link-lib=static=cassandra_static"); }<|fim▁end|>
fn main() { let _ = libbindgen::builder()
<|file_name|>test_custom_lists.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python from gi.repository import Gtk, GObject import time import unittest from testutils import setup_test_env setup_test_env() from softwarecenter.enums import XapianValues, ActionButtons TIMEOUT=300 class TestCustomLists(unittest.TestCase): def _debug(self, index, model, needle): print ("Expected '%s' at index '%s', " + "and custom list contained: '%s'") % ( needle, index, model[index][0].get_value(XapianValues.PKGNAME)) def assertPkgInListAtIndex(self, index, model, needle): doc = model[index][0] self.assertEqual(doc.get_value(XapianValues.PKGNAME), needle, self._debug(index, model, needle)) def test_custom_lists(self): from softwarecenter.ui.gtk3.panes.availablepane import get_test_window<|fim▁hole|> win = get_test_window() pane = win.get_data("pane") self._p() pane.on_search_terms_changed(None, "ark,artha,software-center") self._p() model = pane.app_view.tree_view.get_model() # custom list should return three items self.assertTrue(len(model) == 3) # check package names, ordering is default "by relevance" self.assertPkgInListAtIndex(0, model, "ark") self.assertPkgInListAtIndex(1, model, "software-center") self.assertPkgInListAtIndex(2, model, "artha") # check that the status bar offers to install the packages install_button = pane.action_bar.get_button(ActionButtons.INSTALL) self.assertNotEqual(install_button, None) GObject.timeout_add(TIMEOUT, lambda: win.destroy()) Gtk.main() def _p(self): for i in range(10): time.sleep(0.1) while Gtk.events_pending(): Gtk.main_iteration() if __name__ == "__main__": import logging logging.basicConfig(level=logging.INFO) unittest.main()<|fim▁end|>
<|file_name|>bench.rs<|end_file_name|><|fim▁begin|>#![feature(test)] extern crate fdlimit; extern crate test; extern crate tiny_http; use std::io::Write; use std::process::Command; use tiny_http::Method; #[test] #[ignore] // TODO: obtain time fn curl_bench() { let server = tiny_http::Server::http("0.0.0.0:0").unwrap(); let port = server.server_addr().port(); let num_requests = 10usize; match Command::new("curl") .arg("-s") .arg(format!("http://localhost:{}/?[1-{}]", port, num_requests)) .output() { Ok(p) => p, Err(_) => return, // ignoring test }; drop(server); } #[bench] fn sequential_requests(bencher: &mut test::Bencher) { let server = tiny_http::Server::http("0.0.0.0:0").unwrap(); let port = server.server_addr().port(); let mut stream = std::net::TcpStream::connect(("127.0.0.1", port)).unwrap(); bencher.iter(|| { (write!(stream, "GET / HTTP/1.1\r\nHost: localhost\r\n\r\n")).unwrap(); let request = server.recv().unwrap(); assert_eq!(request.method(), &Method::Get); request.respond(tiny_http::Response::new_empty(tiny_http::StatusCode(204))); }); } #[bench] fn parallel_requests(bencher: &mut test::Bencher) { fdlimit::raise_fd_limit(); let server = tiny_http::Server::http("0.0.0.0:0").unwrap(); let port = server.server_addr().port(); bencher.iter(|| { let mut streams = Vec::new(); for _ in 0..1000usize { let mut stream = std::net::TcpStream::connect(("127.0.0.1", port)).unwrap(); (write!( stream, "GET / HTTP/1.1\r\nHost: localhost\r\nConnection: close\r\n\r\n" )) .unwrap(); streams.push(stream); }<|fim▁hole|> Some(rq) => rq, }; assert_eq!(request.method(), &Method::Get); request.respond(tiny_http::Response::new_empty(tiny_http::StatusCode(204))); } }); }<|fim▁end|>
loop { let request = match server.try_recv().unwrap() { None => break,
<|file_name|>OmemoManager.java<|end_file_name|><|fim▁begin|>/** * * Copyright 2017 Paul Schaub, 2020 Florian Schmaus * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS,<|fim▁hole|> * limitations under the License. */ package org.jivesoftware.smackx.omemo; import static org.jivesoftware.smackx.omemo.util.OmemoConstants.OMEMO_NAMESPACE_V_AXOLOTL; import java.io.IOException; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Random; import java.util.Set; import java.util.SortedSet; import java.util.TreeMap; import java.util.WeakHashMap; import java.util.logging.Level; import java.util.logging.Logger; import org.jivesoftware.smack.ConnectionListener; import org.jivesoftware.smack.Manager; import org.jivesoftware.smack.SmackException; import org.jivesoftware.smack.SmackException.NotConnectedException; import org.jivesoftware.smack.XMPPConnection; import org.jivesoftware.smack.XMPPException; import org.jivesoftware.smack.packet.Message; import org.jivesoftware.smack.packet.MessageBuilder; import org.jivesoftware.smack.packet.Stanza; import org.jivesoftware.smack.util.Async; import org.jivesoftware.smackx.carbons.CarbonManager; import org.jivesoftware.smackx.carbons.packet.CarbonExtension; import org.jivesoftware.smackx.disco.ServiceDiscoveryManager; import org.jivesoftware.smackx.hints.element.StoreHint; import org.jivesoftware.smackx.mam.MamManager; import org.jivesoftware.smackx.muc.MultiUserChat; import org.jivesoftware.smackx.muc.MultiUserChatManager; import org.jivesoftware.smackx.muc.RoomInfo; import org.jivesoftware.smackx.omemo.element.OmemoBundleElement; import org.jivesoftware.smackx.omemo.element.OmemoDeviceListElement; import org.jivesoftware.smackx.omemo.element.OmemoDeviceListElement_VAxolotl; import org.jivesoftware.smackx.omemo.element.OmemoElement; import org.jivesoftware.smackx.omemo.exceptions.CannotEstablishOmemoSessionException; import org.jivesoftware.smackx.omemo.exceptions.CorruptedOmemoKeyException; import org.jivesoftware.smackx.omemo.exceptions.CryptoFailedException; import org.jivesoftware.smackx.omemo.exceptions.NoOmemoSupportException; import org.jivesoftware.smackx.omemo.exceptions.NoRawSessionException; import org.jivesoftware.smackx.omemo.exceptions.UndecidedOmemoIdentityException; import org.jivesoftware.smackx.omemo.internal.OmemoCachedDeviceList; import org.jivesoftware.smackx.omemo.internal.OmemoDevice; import org.jivesoftware.smackx.omemo.listener.OmemoMessageListener; import org.jivesoftware.smackx.omemo.listener.OmemoMucMessageListener; import org.jivesoftware.smackx.omemo.trust.OmemoFingerprint; import org.jivesoftware.smackx.omemo.trust.OmemoTrustCallback; import org.jivesoftware.smackx.omemo.trust.TrustState; import org.jivesoftware.smackx.omemo.util.MessageOrOmemoMessage; import org.jivesoftware.smackx.omemo.util.OmemoConstants; import org.jivesoftware.smackx.pep.PepEventListener; import org.jivesoftware.smackx.pep.PepManager; import org.jivesoftware.smackx.pubsub.PubSubException; import org.jivesoftware.smackx.pubsub.PubSubManager; import org.jivesoftware.smackx.pubsub.packet.PubSub; import org.jxmpp.jid.BareJid; import org.jxmpp.jid.DomainBareJid; import org.jxmpp.jid.EntityBareJid; import org.jxmpp.jid.EntityFullJid; /** * Manager that allows sending messages encrypted with OMEMO. * This class also provides some methods useful for a client that implements OMEMO. * * @author Paul Schaub */ public final class OmemoManager extends Manager { private static final Logger LOGGER = Logger.getLogger(OmemoManager.class.getName()); private static final Integer UNKNOWN_DEVICE_ID = -1; private static final WeakHashMap<XMPPConnection, TreeMap<Integer, OmemoManager>> INSTANCES = new WeakHashMap<>(); private final OmemoService<?, ?, ?, ?, ?, ?, ?, ?, ?> service; private final HashSet<OmemoMessageListener> omemoMessageListeners = new HashSet<>(); private final HashSet<OmemoMucMessageListener> omemoMucMessageListeners = new HashSet<>(); private final PepManager pepManager; private OmemoTrustCallback trustCallback; private BareJid ownJid; private Integer deviceId; /** * Private constructor. * * @param connection connection * @param deviceId deviceId */ private OmemoManager(XMPPConnection connection, Integer deviceId) { super(connection); service = OmemoService.getInstance(); pepManager = PepManager.getInstanceFor(connection); this.deviceId = deviceId; if (connection.isAuthenticated()) { initBareJidAndDeviceId(this); } else { connection.addConnectionListener(new ConnectionListener() { @Override public void authenticated(XMPPConnection connection, boolean resumed) { initBareJidAndDeviceId(OmemoManager.this); } }); } service.registerRatchetForManager(this); // StanzaListeners resumeStanzaAndPEPListeners(); } /** * Return an OmemoManager instance for the given connection and deviceId. * If there was an OmemoManager for the connection and id before, return it. Otherwise create a new OmemoManager * instance and return it. * * @param connection XmppConnection. * @param deviceId MUST NOT be null and MUST be greater than 0. * * @return OmemoManager instance for the given connection and deviceId. */ public static synchronized OmemoManager getInstanceFor(XMPPConnection connection, Integer deviceId) { if (deviceId == null || deviceId < 1) { throw new IllegalArgumentException("DeviceId MUST NOT be null and MUST be greater than 0."); } TreeMap<Integer, OmemoManager> managersOfConnection = INSTANCES.get(connection); if (managersOfConnection == null) { managersOfConnection = new TreeMap<>(); INSTANCES.put(connection, managersOfConnection); } OmemoManager manager = managersOfConnection.get(deviceId); if (manager == null) { manager = new OmemoManager(connection, deviceId); managersOfConnection.put(deviceId, manager); } return manager; } /** * Returns an OmemoManager instance for the given connection. If there was one manager for the connection before, * return it. If there were multiple managers before, return the one with the lowest deviceId. * If there was no manager before, return a new one. As soon as the connection gets authenticated, the manager * will look for local deviceIDs and select the lowest one as its id. If there are not local deviceIds, the manager * will assign itself a random id. * * @param connection XmppConnection. * * @return OmemoManager instance for the given connection and a determined deviceId. */ public static synchronized OmemoManager getInstanceFor(XMPPConnection connection) { TreeMap<Integer, OmemoManager> managers = INSTANCES.get(connection); if (managers == null) { managers = new TreeMap<>(); INSTANCES.put(connection, managers); } OmemoManager manager; if (managers.size() == 0) { manager = new OmemoManager(connection, UNKNOWN_DEVICE_ID); managers.put(UNKNOWN_DEVICE_ID, manager); } else { manager = managers.get(managers.firstKey()); } return manager; } /** * Set a TrustCallback for this particular OmemoManager. * TrustCallbacks are used to query and modify trust decisions. * * @param callback trustCallback. */ public void setTrustCallback(OmemoTrustCallback callback) { if (trustCallback != null) { throw new IllegalStateException("TrustCallback can only be set once."); } trustCallback = callback; } /** * Return the TrustCallback of this manager. * * @return callback that is used for trust decisions. */ OmemoTrustCallback getTrustCallback() { return trustCallback; } /** * Initializes the OmemoManager. This method must be called before the manager can be used. * * @throws CorruptedOmemoKeyException if the OMEMO key is corrupted. * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException.NoResponseException if there was no response from the remote entity. * @throws SmackException.NotConnectedException if the XMPP connection is not connected. * @throws XMPPException.XMPPErrorException if there was an XMPP error returned. * @throws SmackException.NotLoggedInException if the XMPP connection is not authenticated. * @throws PubSubException.NotALeafNodeException if a PubSub leaf node operation was attempted on a non-leaf node. * @throws IOException if an I/O error occurred. */ public synchronized void initialize() throws SmackException.NotLoggedInException, CorruptedOmemoKeyException, InterruptedException, SmackException.NoResponseException, SmackException.NotConnectedException, XMPPException.XMPPErrorException, PubSubException.NotALeafNodeException, IOException { if (!connection().isAuthenticated()) { throw new SmackException.NotLoggedInException(); } if (getTrustCallback() == null) { throw new IllegalStateException("No TrustCallback set."); } getOmemoService().init(new LoggedInOmemoManager(this)); } /** * Initialize the manager without blocking. Once the manager is successfully initialized, the finishedCallback will * be notified. It will also get notified, if an error occurs. * * @param finishedCallback callback that gets called once the manager is initialized. */ public void initializeAsync(final InitializationFinishedCallback finishedCallback) { Async.go(new Runnable() { @Override public void run() { try { initialize(); finishedCallback.initializationFinished(OmemoManager.this); } catch (Exception e) { finishedCallback.initializationFailed(e); } } }); } /** * Return a set of all OMEMO capable devices of a contact. * Note, that this method does not explicitly refresh the device list of the contact, so it might be outdated. * * @see #requestDeviceListUpdateFor(BareJid) * * @param contact contact we want to get a set of device of. * @return set of known devices of that contact. * * @throws IOException if an I/O error occurred. */ public Set<OmemoDevice> getDevicesOf(BareJid contact) throws IOException { OmemoCachedDeviceList list = getOmemoService().getOmemoStoreBackend().loadCachedDeviceList(getOwnDevice(), contact); HashSet<OmemoDevice> devices = new HashSet<>(); for (int deviceId : list.getActiveDevices()) { devices.add(new OmemoDevice(contact, deviceId)); } return devices; } /** * OMEMO encrypt a cleartext message for a single recipient. * Note that this method does NOT set the 'to' attribute of the message. * * @param recipient recipients bareJid * @param message text to encrypt * @return encrypted message * * @throws CryptoFailedException when something crypto related fails * @throws UndecidedOmemoIdentityException When there are undecided devices * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException.NotConnectedException if the XMPP connection is not connected. * @throws SmackException.NoResponseException if there was no response from the remote entity. * @throws SmackException.NotLoggedInException if the XMPP connection is not authenticated. * @throws IOException if an I/O error occurred. */ public OmemoMessage.Sent encrypt(BareJid recipient, String message) throws CryptoFailedException, UndecidedOmemoIdentityException, InterruptedException, SmackException.NotConnectedException, SmackException.NoResponseException, SmackException.NotLoggedInException, IOException { Set<BareJid> recipients = new HashSet<>(); recipients.add(recipient); return encrypt(recipients, message); } /** * OMEMO encrypt a cleartext message for multiple recipients. * * @param recipients recipients barejids * @param message text to encrypt * @return encrypted message. * * @throws CryptoFailedException When something crypto related fails * @throws UndecidedOmemoIdentityException When there are undecided devices. * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException.NotConnectedException if the XMPP connection is not connected. * @throws SmackException.NoResponseException if there was no response from the remote entity. * @throws SmackException.NotLoggedInException if the XMPP connection is not authenticated. * @throws IOException if an I/O error occurred. */ public synchronized OmemoMessage.Sent encrypt(Set<BareJid> recipients, String message) throws CryptoFailedException, UndecidedOmemoIdentityException, InterruptedException, SmackException.NotConnectedException, SmackException.NoResponseException, SmackException.NotLoggedInException, IOException { LoggedInOmemoManager guard = new LoggedInOmemoManager(this); Set<OmemoDevice> devices = getDevicesOf(getOwnJid()); for (BareJid recipient : recipients) { devices.addAll(getDevicesOf(recipient)); } return service.createOmemoMessage(guard, devices, message); } /** * Encrypt a message for all recipients in the MultiUserChat. * * @param muc multiUserChat * @param message message to send * @return encrypted message * * @throws UndecidedOmemoIdentityException when there are undecided devices. * @throws CryptoFailedException if the OMEMO cryptography failed. * @throws XMPPException.XMPPErrorException if there was an XMPP error returned. * @throws SmackException.NotConnectedException if the XMPP connection is not connected. * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException.NoResponseException if there was no response from the remote entity. * @throws NoOmemoSupportException When the muc doesn't support OMEMO. * @throws SmackException.NotLoggedInException if the XMPP connection is not authenticated. * @throws IOException if an I/O error occurred. */ public synchronized OmemoMessage.Sent encrypt(MultiUserChat muc, String message) throws UndecidedOmemoIdentityException, CryptoFailedException, XMPPException.XMPPErrorException, SmackException.NotConnectedException, InterruptedException, SmackException.NoResponseException, NoOmemoSupportException, SmackException.NotLoggedInException, IOException { if (!multiUserChatSupportsOmemo(muc)) { throw new NoOmemoSupportException(); } Set<BareJid> recipients = new HashSet<>(); for (EntityFullJid e : muc.getOccupants()) { recipients.add(muc.getOccupant(e).getJid().asBareJid()); } return encrypt(recipients, message); } /** * Manually decrypt an OmemoElement. * This method should only be used for use-cases, where the internal listeners don't pick up on an incoming message. * (for example MAM query results). * * @param sender bareJid of the message sender (must be the jid of the contact who sent the message) * @param omemoElement omemoElement * @return decrypted OmemoMessage * * @throws SmackException.NotLoggedInException if the Manager is not authenticated * @throws CorruptedOmemoKeyException if our or their key is corrupted * @throws NoRawSessionException if the message was not a preKeyMessage, but we had no session with the contact * @throws CryptoFailedException if decryption fails * @throws IOException if an I/O error occurred. */ public OmemoMessage.Received decrypt(BareJid sender, OmemoElement omemoElement) throws SmackException.NotLoggedInException, CorruptedOmemoKeyException, NoRawSessionException, CryptoFailedException, IOException { LoggedInOmemoManager managerGuard = new LoggedInOmemoManager(this); return getOmemoService().decryptMessage(managerGuard, sender, omemoElement); } /** * Decrypt messages from a MAM query. * * @param mamQuery The MAM query * @return list of decrypted OmemoMessages * * @throws SmackException.NotLoggedInException if the Manager is not authenticated. * @throws IOException if an I/O error occurred. */ public List<MessageOrOmemoMessage> decryptMamQueryResult(MamManager.MamQuery mamQuery) throws SmackException.NotLoggedInException, IOException { return new ArrayList<>(getOmemoService().decryptMamQueryResult(new LoggedInOmemoManager(this), mamQuery)); } /** * Trust that a fingerprint belongs to an OmemoDevice. * The fingerprint must be the lowercase, hexadecimal fingerprint of the identityKey of the device and must * be of length 64. * * @param device device * @param fingerprint fingerprint */ public void trustOmemoIdentity(OmemoDevice device, OmemoFingerprint fingerprint) { if (trustCallback == null) { throw new IllegalStateException("No TrustCallback set."); } trustCallback.setTrust(device, fingerprint, TrustState.trusted); } /** * Distrust the fingerprint/OmemoDevice tuple. * The fingerprint must be the lowercase, hexadecimal fingerprint of the identityKey of the device and must * be of length 64. * * @param device device * @param fingerprint fingerprint */ public void distrustOmemoIdentity(OmemoDevice device, OmemoFingerprint fingerprint) { if (trustCallback == null) { throw new IllegalStateException("No TrustCallback set."); } trustCallback.setTrust(device, fingerprint, TrustState.untrusted); } /** * Returns true, if the fingerprint/OmemoDevice tuple is trusted, otherwise false. * The fingerprint must be the lowercase, hexadecimal fingerprint of the identityKey of the device and must * be of length 64. * * @param device device * @param fingerprint fingerprint * @return <code>true</code> if this is a trusted OMEMO identity. */ public boolean isTrustedOmemoIdentity(OmemoDevice device, OmemoFingerprint fingerprint) { if (trustCallback == null) { throw new IllegalStateException("No TrustCallback set."); } return trustCallback.getTrust(device, fingerprint) == TrustState.trusted; } /** * Returns true, if the fingerprint/OmemoDevice tuple is decided by the user. * The fingerprint must be the lowercase, hexadecimal fingerprint of the identityKey of the device and must * be of length 64. * * @param device device * @param fingerprint fingerprint * @return <code>true</code> if the trust is decided for the identity. */ public boolean isDecidedOmemoIdentity(OmemoDevice device, OmemoFingerprint fingerprint) { if (trustCallback == null) { throw new IllegalStateException("No TrustCallback set."); } return trustCallback.getTrust(device, fingerprint) != TrustState.undecided; } /** * Send a ratchet update message. This can be used to advance the ratchet of a session in order to maintain forward * secrecy. * * @param recipient recipient * * @throws CorruptedOmemoKeyException When the used identityKeys are corrupted * @throws CryptoFailedException When something fails with the crypto * @throws CannotEstablishOmemoSessionException When we can't establish a session with the recipient * @throws SmackException.NotLoggedInException if the XMPP connection is not authenticated. * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException.NoResponseException if there was no response from the remote entity. * @throws NoSuchAlgorithmException if no such algorithm is available. * @throws SmackException.NotConnectedException if the XMPP connection is not connected. * @throws IOException if an I/O error occurred. */ public synchronized void sendRatchetUpdateMessage(OmemoDevice recipient) throws SmackException.NotLoggedInException, CorruptedOmemoKeyException, InterruptedException, SmackException.NoResponseException, NoSuchAlgorithmException, SmackException.NotConnectedException, CryptoFailedException, CannotEstablishOmemoSessionException, IOException { XMPPConnection connection = connection(); MessageBuilder message = connection.getStanzaFactory() .buildMessageStanza() .to(recipient.getJid()); OmemoElement element = getOmemoService().createRatchetUpdateElement(new LoggedInOmemoManager(this), recipient); message.addExtension(element); // Set MAM Storage hint StoreHint.set(message); connection.sendStanza(message.build()); } /** * Returns true, if the contact has any active devices published in a deviceList. * * @param contact contact * @return true if contact has at least one OMEMO capable device. * * @throws SmackException.NotConnectedException if the XMPP connection is not connected. * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException.NoResponseException if there was no response from the remote entity. * @throws PubSubException.NotALeafNodeException if a PubSub leaf node operation was attempted on a non-leaf node. * @throws XMPPException.XMPPErrorException if there was an XMPP error returned. * @throws IOException if an I/O error occurred. */ public synchronized boolean contactSupportsOmemo(BareJid contact) throws InterruptedException, PubSubException.NotALeafNodeException, XMPPException.XMPPErrorException, SmackException.NotConnectedException, SmackException.NoResponseException, IOException { OmemoCachedDeviceList deviceList = getOmemoService().refreshDeviceList(connection(), getOwnDevice(), contact); return !deviceList.getActiveDevices().isEmpty(); } /** * Returns true, if the MUC with the EntityBareJid multiUserChat is non-anonymous and members only (prerequisite * for OMEMO encryption in MUC). * * @param multiUserChat MUC * @return true if chat supports OMEMO * * @throws XMPPException.XMPPErrorException if there was an XMPP protocol level error * @throws SmackException.NotConnectedException if the connection is not connected * @throws InterruptedException if the thread is interrupted * @throws SmackException.NoResponseException if the server does not respond */ public boolean multiUserChatSupportsOmemo(MultiUserChat multiUserChat) throws XMPPException.XMPPErrorException, SmackException.NotConnectedException, InterruptedException, SmackException.NoResponseException { EntityBareJid jid = multiUserChat.getRoom(); RoomInfo roomInfo = MultiUserChatManager.getInstanceFor(connection()).getRoomInfo(jid); return roomInfo.isNonanonymous() && roomInfo.isMembersOnly(); } /** * Returns true, if the Server supports PEP. * * @param connection XMPPConnection * @param server domainBareJid of the server to test * @return true if server supports pep * * @throws XMPPException.XMPPErrorException if there was an XMPP error returned. * @throws SmackException.NotConnectedException if the XMPP connection is not connected. * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException.NoResponseException if there was no response from the remote entity. */ public static boolean serverSupportsOmemo(XMPPConnection connection, DomainBareJid server) throws XMPPException.XMPPErrorException, SmackException.NotConnectedException, InterruptedException, SmackException.NoResponseException { return ServiceDiscoveryManager.getInstanceFor(connection) .discoverInfo(server).containsFeature(PubSub.NAMESPACE); } /** * Return the fingerprint of our identity key. * * @return our own OMEMO fingerprint * * @throws SmackException.NotLoggedInException if we don't know our bareJid yet. * @throws CorruptedOmemoKeyException if our identityKey is corrupted. * @throws IOException if an I/O error occurred. */ public synchronized OmemoFingerprint getOwnFingerprint() throws SmackException.NotLoggedInException, CorruptedOmemoKeyException, IOException { if (getOwnJid() == null) { throw new SmackException.NotLoggedInException(); } return getOmemoService().getOmemoStoreBackend().getFingerprint(getOwnDevice()); } /** * Get the fingerprint of a contacts device. * * @param device contacts OmemoDevice * @return fingerprint of the given OMEMO device. * * @throws CannotEstablishOmemoSessionException if we have no session yet, and are unable to create one. * @throws SmackException.NotLoggedInException if the XMPP connection is not authenticated. * @throws CorruptedOmemoKeyException if the copy of the fingerprint we have is corrupted. * @throws SmackException.NotConnectedException if the XMPP connection is not connected. * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException.NoResponseException if there was no response from the remote entity. * @throws IOException if an I/O error occurred. */ public synchronized OmemoFingerprint getFingerprint(OmemoDevice device) throws CannotEstablishOmemoSessionException, SmackException.NotLoggedInException, CorruptedOmemoKeyException, SmackException.NotConnectedException, InterruptedException, SmackException.NoResponseException, IOException { if (getOwnJid() == null) { throw new SmackException.NotLoggedInException(); } if (device.equals(getOwnDevice())) { return getOwnFingerprint(); } return getOmemoService().getOmemoStoreBackend() .getFingerprintAndMaybeBuildSession(new LoggedInOmemoManager(this), device); } /** * Return all OmemoFingerprints of active devices of a contact. * TODO: Make more fail-safe * * @param contact contact * @return Map of all active devices of the contact and their fingerprints. * * @throws SmackException.NotLoggedInException if the XMPP connection is not authenticated. * @throws CorruptedOmemoKeyException if the OMEMO key is corrupted. * @throws CannotEstablishOmemoSessionException if no OMEMO session could be established. * @throws SmackException.NotConnectedException if the XMPP connection is not connected. * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException.NoResponseException if there was no response from the remote entity. * @throws IOException if an I/O error occurred. */ public synchronized HashMap<OmemoDevice, OmemoFingerprint> getActiveFingerprints(BareJid contact) throws SmackException.NotLoggedInException, CorruptedOmemoKeyException, CannotEstablishOmemoSessionException, SmackException.NotConnectedException, InterruptedException, SmackException.NoResponseException, IOException { if (getOwnJid() == null) { throw new SmackException.NotLoggedInException(); } HashMap<OmemoDevice, OmemoFingerprint> fingerprints = new HashMap<>(); OmemoCachedDeviceList deviceList = getOmemoService().getOmemoStoreBackend().loadCachedDeviceList(getOwnDevice(), contact); for (int id : deviceList.getActiveDevices()) { OmemoDevice device = new OmemoDevice(contact, id); OmemoFingerprint fingerprint = getFingerprint(device); if (fingerprint != null) { fingerprints.put(device, fingerprint); } } return fingerprints; } /** * Add an OmemoMessageListener. This listener will be informed about incoming OMEMO messages * (as well as KeyTransportMessages) and OMEMO encrypted message carbons. * * @param listener OmemoMessageListener */ public void addOmemoMessageListener(OmemoMessageListener listener) { omemoMessageListeners.add(listener); } /** * Remove an OmemoMessageListener. * * @param listener OmemoMessageListener */ public void removeOmemoMessageListener(OmemoMessageListener listener) { omemoMessageListeners.remove(listener); } /** * Add an OmemoMucMessageListener. This listener will be informed about incoming OMEMO encrypted MUC messages. * * @param listener OmemoMessageListener. */ public void addOmemoMucMessageListener(OmemoMucMessageListener listener) { omemoMucMessageListeners.add(listener); } /** * Remove an OmemoMucMessageListener. * * @param listener OmemoMucMessageListener */ public void removeOmemoMucMessageListener(OmemoMucMessageListener listener) { omemoMucMessageListeners.remove(listener); } /** * Request a deviceList update from contact contact. * * @param contact contact we want to obtain the deviceList from. * * @throws InterruptedException if the calling thread was interrupted. * @throws PubSubException.NotALeafNodeException if a PubSub leaf node operation was attempted on a non-leaf node. * @throws XMPPException.XMPPErrorException if there was an XMPP error returned. * @throws SmackException.NotConnectedException if the XMPP connection is not connected. * @throws SmackException.NoResponseException if there was no response from the remote entity. * @throws IOException if an I/O error occurred. */ public synchronized void requestDeviceListUpdateFor(BareJid contact) throws InterruptedException, PubSubException.NotALeafNodeException, XMPPException.XMPPErrorException, SmackException.NotConnectedException, SmackException.NoResponseException, IOException { getOmemoService().refreshDeviceList(connection(), getOwnDevice(), contact); } /** * Publish a new device list with just our own deviceId in it. * * @throws SmackException.NotLoggedInException if the XMPP connection is not authenticated. * @throws InterruptedException if the calling thread was interrupted. * @throws XMPPException.XMPPErrorException if there was an XMPP error returned. * @throws SmackException.NotConnectedException if the XMPP connection is not connected. * @throws SmackException.NoResponseException if there was no response from the remote entity. * @throws IOException if an I/O error occurred. * @throws PubSubException.NotALeafNodeException if a PubSub leaf node operation was attempted on a non-leaf node. */ public void purgeDeviceList() throws SmackException.NotLoggedInException, InterruptedException, XMPPException.XMPPErrorException, SmackException.NotConnectedException, SmackException.NoResponseException, IOException, PubSubException.NotALeafNodeException { getOmemoService().purgeDeviceList(new LoggedInOmemoManager(this)); } public List<Exception> purgeEverything() throws NotConnectedException, InterruptedException, IOException { List<Exception> exceptions = new ArrayList<>(5); PubSubManager pm = PubSubManager.getInstanceFor(getConnection(), getOwnJid()); try { requestDeviceListUpdateFor(getOwnJid()); } catch (SmackException.NoResponseException | PubSubException.NotALeafNodeException | XMPPException.XMPPErrorException e) { exceptions.add(e); } OmemoCachedDeviceList deviceList = OmemoService.getInstance().getOmemoStoreBackend() .loadCachedDeviceList(getOwnDevice(), getOwnJid()); for (int id : deviceList.getAllDevices()) { try { pm.getLeafNode(OmemoConstants.PEP_NODE_BUNDLE_FROM_DEVICE_ID(id)).deleteAllItems(); } catch (SmackException.NoResponseException | PubSubException.NotALeafNodeException | XMPPException.XMPPErrorException | PubSubException.NotAPubSubNodeException e) { exceptions.add(e); } try { pm.deleteNode(OmemoConstants.PEP_NODE_BUNDLE_FROM_DEVICE_ID(id)); } catch (SmackException.NoResponseException | XMPPException.XMPPErrorException e) { exceptions.add(e); } } try { pm.getLeafNode(OmemoConstants.PEP_NODE_DEVICE_LIST).deleteAllItems(); } catch (SmackException.NoResponseException | PubSubException.NotALeafNodeException | XMPPException.XMPPErrorException | PubSubException.NotAPubSubNodeException e) { exceptions.add(e); } try { pm.deleteNode(OmemoConstants.PEP_NODE_DEVICE_LIST); } catch (SmackException.NoResponseException | XMPPException.XMPPErrorException e) { exceptions.add(e); } return exceptions; } /** * Rotate the signedPreKey published in our OmemoBundle and republish it. This should be done every now and * then (7-14 days). The old signedPreKey should be kept for some more time (a month or so) to enable decryption * of messages that have been sent since the key was changed. * * @throws CorruptedOmemoKeyException When the IdentityKeyPair is damaged. * @throws InterruptedException XMPP error * @throws XMPPException.XMPPErrorException XMPP error * @throws SmackException.NotConnectedException XMPP error * @throws SmackException.NoResponseException XMPP error * @throws SmackException.NotLoggedInException if the XMPP connection is not authenticated. * @throws IOException if an I/O error occurred. * @throws PubSubException.NotALeafNodeException if a PubSub leaf node operation was attempted on a non-leaf node. */ public synchronized void rotateSignedPreKey() throws CorruptedOmemoKeyException, SmackException.NotLoggedInException, XMPPException.XMPPErrorException, SmackException.NotConnectedException, InterruptedException, SmackException.NoResponseException, IOException, PubSubException.NotALeafNodeException { if (!connection().isAuthenticated()) { throw new SmackException.NotLoggedInException(); } // generate key getOmemoService().getOmemoStoreBackend().changeSignedPreKey(getOwnDevice()); // publish OmemoBundleElement bundle = getOmemoService().getOmemoStoreBackend().packOmemoBundle(getOwnDevice()); OmemoService.publishBundle(connection(), getOwnDevice(), bundle); } /** * Return true, if the given Stanza contains an OMEMO element 'encrypted'. * * @param stanza stanza * @return true if stanza has extension 'encrypted' */ static boolean stanzaContainsOmemoElement(Stanza stanza) { return stanza.hasExtension(OmemoElement.NAME_ENCRYPTED, OMEMO_NAMESPACE_V_AXOLOTL); } /** * Throw an IllegalStateException if no OmemoService is set. */ private void throwIfNoServiceSet() { if (service == null) { throw new IllegalStateException("No OmemoService set in OmemoManager."); } } /** * Returns a pseudo random number from the interval [1, Integer.MAX_VALUE]. * * @return a random deviceId. */ public static int randomDeviceId() { return new Random().nextInt(Integer.MAX_VALUE - 1) + 1; } /** * Return the BareJid of the user. * * @return our own bare JID. */ public BareJid getOwnJid() { if (ownJid == null && connection().isAuthenticated()) { ownJid = connection().getUser().asBareJid(); } return ownJid; } /** * Return the deviceId of this OmemoManager. * * @return this OmemoManagers deviceId. */ public synchronized Integer getDeviceId() { return deviceId; } /** * Return the OmemoDevice of the user. * * @return our own OmemoDevice */ public synchronized OmemoDevice getOwnDevice() { BareJid jid = getOwnJid(); if (jid == null) { return null; } return new OmemoDevice(jid, getDeviceId()); } /** * Set the deviceId of the manager to nDeviceId. * * @param nDeviceId new deviceId */ synchronized void setDeviceId(int nDeviceId) { // Move this instance inside the HashMaps INSTANCES.get(connection()).remove(getDeviceId()); INSTANCES.get(connection()).put(nDeviceId, this); this.deviceId = nDeviceId; } /** * Notify all registered OmemoMessageListeners about a received OmemoMessage. * * @param stanza original stanza * @param decryptedMessage decrypted OmemoMessage. */ void notifyOmemoMessageReceived(Stanza stanza, OmemoMessage.Received decryptedMessage) { for (OmemoMessageListener l : omemoMessageListeners) { l.onOmemoMessageReceived(stanza, decryptedMessage); } } /** * Notify all registered OmemoMucMessageListeners of an incoming OmemoMessageElement in a MUC. * * @param muc MultiUserChat the message was received in. * @param stanza Original Stanza. * @param decryptedMessage Decrypted OmemoMessage. */ void notifyOmemoMucMessageReceived(MultiUserChat muc, Stanza stanza, OmemoMessage.Received decryptedMessage) { for (OmemoMucMessageListener l : omemoMucMessageListeners) { l.onOmemoMucMessageReceived(muc, stanza, decryptedMessage); } } /** * Notify all registered OmemoMessageListeners of an incoming OMEMO encrypted Carbon Copy. * Remember: If you want to receive OMEMO encrypted carbon copies, you have to enable carbons using * {@link CarbonManager#enableCarbons()}. * * @param direction direction of the carbon copy * @param carbonCopy carbon copy itself * @param wrappingMessage wrapping message * @param decryptedCarbonCopy decrypted carbon copy OMEMO element */ void notifyOmemoCarbonCopyReceived(CarbonExtension.Direction direction, Message carbonCopy, Message wrappingMessage, OmemoMessage.Received decryptedCarbonCopy) { for (OmemoMessageListener l : omemoMessageListeners) { l.onOmemoCarbonCopyReceived(direction, carbonCopy, wrappingMessage, decryptedCarbonCopy); } } /** * Register stanza listeners needed for OMEMO. * This method is called automatically in the constructor and should only be used to restore the previous state * after {@link #stopStanzaAndPEPListeners()} was called. */ public void resumeStanzaAndPEPListeners() { CarbonManager carbonManager = CarbonManager.getInstanceFor(connection()); // Remove listeners to avoid them getting added twice connection().removeAsyncStanzaListener(this::internalOmemoMessageStanzaListener); carbonManager.removeCarbonCopyReceivedListener(this::internalOmemoCarbonCopyListener); // Add listeners pepManager.addPepEventListener(OmemoConstants.PEP_NODE_DEVICE_LIST, OmemoDeviceListElement.class, pepOmemoDeviceListEventListener); connection().addAsyncStanzaListener(this::internalOmemoMessageStanzaListener, OmemoManager::isOmemoMessage); carbonManager.addCarbonCopyReceivedListener(this::internalOmemoCarbonCopyListener); } /** * Remove active stanza listeners needed for OMEMO. */ public void stopStanzaAndPEPListeners() { pepManager.removePepEventListener(pepOmemoDeviceListEventListener); connection().removeAsyncStanzaListener(this::internalOmemoMessageStanzaListener); CarbonManager.getInstanceFor(connection()).removeCarbonCopyReceivedListener(this::internalOmemoCarbonCopyListener); } /** * Build a fresh session with a contacts device. * This might come in handy if a session is broken. * * @param contactsDevice OmemoDevice of a contact. * * @throws InterruptedException if the calling thread was interrupted. * @throws SmackException.NoResponseException if there was no response from the remote entity. * @throws CorruptedOmemoKeyException if our or their identityKey is corrupted. * @throws SmackException.NotConnectedException if the XMPP connection is not connected. * @throws CannotEstablishOmemoSessionException if no new session can be established. * @throws SmackException.NotLoggedInException if the connection is not authenticated. */ public void rebuildSessionWith(OmemoDevice contactsDevice) throws InterruptedException, SmackException.NoResponseException, CorruptedOmemoKeyException, SmackException.NotConnectedException, CannotEstablishOmemoSessionException, SmackException.NotLoggedInException { if (!connection().isAuthenticated()) { throw new SmackException.NotLoggedInException(); } getOmemoService().buildFreshSessionWithDevice(connection(), getOwnDevice(), contactsDevice); } /** * Get our connection. * * @return the connection of this manager */ XMPPConnection getConnection() { return connection(); } /** * Return the OMEMO service object. * * @return the OmemoService object related to this OmemoManager. */ OmemoService<?, ?, ?, ?, ?, ?, ?, ?, ?> getOmemoService() { throwIfNoServiceSet(); return service; } /** * StanzaListener that listens for incoming Stanzas which contain OMEMO elements. */ private void internalOmemoMessageStanzaListener(final Stanza packet) { Async.go(new Runnable() { @Override public void run() { try { getOmemoService().onOmemoMessageStanzaReceived(packet, new LoggedInOmemoManager(OmemoManager.this)); } catch (SmackException.NotLoggedInException | IOException e) { LOGGER.log(Level.SEVERE, "Exception while processing OMEMO stanza", e); } } }); } /** * CarbonCopyListener that listens for incoming carbon copies which contain OMEMO elements. */ private void internalOmemoCarbonCopyListener(final CarbonExtension.Direction direction, final Message carbonCopy, final Message wrappingMessage) { Async.go(new Runnable() { @Override public void run() { if (isOmemoMessage(carbonCopy)) { try { getOmemoService().onOmemoCarbonCopyReceived(direction, carbonCopy, wrappingMessage, new LoggedInOmemoManager(OmemoManager.this)); } catch (SmackException.NotLoggedInException | IOException e) { LOGGER.log(Level.SEVERE, "Exception while processing OMEMO stanza", e); } } } }); } @SuppressWarnings("UnnecessaryLambda") private final PepEventListener<OmemoDeviceListElement> pepOmemoDeviceListEventListener = (from, receivedDeviceList, id, message) -> { // Device List <list> OmemoCachedDeviceList deviceList; try { getOmemoService().getOmemoStoreBackend().mergeCachedDeviceList(getOwnDevice(), from, receivedDeviceList); if (!from.asBareJid().equals(getOwnJid())) { return; } deviceList = getOmemoService().cleanUpDeviceList(getOwnDevice()); } catch (IOException e) { LOGGER.log(Level.SEVERE, "IOException while processing OMEMO PEP device updates. Message: " + message, e); return; } final OmemoDeviceListElement_VAxolotl newDeviceList = new OmemoDeviceListElement_VAxolotl(deviceList); if (!newDeviceList.copyDeviceIds().equals(receivedDeviceList.copyDeviceIds())) { LOGGER.log(Level.FINE, "Republish deviceList due to changes:" + " Received: " + Arrays.toString(receivedDeviceList.copyDeviceIds().toArray()) + " Published: " + Arrays.toString(newDeviceList.copyDeviceIds().toArray())); Async.go(new Runnable() { @Override public void run() { try { OmemoService.publishDeviceList(connection(), newDeviceList); } catch (InterruptedException | XMPPException.XMPPErrorException | SmackException.NotConnectedException | SmackException.NoResponseException | PubSubException.NotALeafNodeException e) { LOGGER.log(Level.WARNING, "Could not publish our deviceList upon an received update.", e); } } }); } }; /** * StanzaFilter that filters messages containing a OMEMO element. */ private static boolean isOmemoMessage(Stanza stanza) { return stanza instanceof Message && OmemoManager.stanzaContainsOmemoElement(stanza); } /** * Guard class which ensures that the wrapped OmemoManager knows its BareJid. */ public static class LoggedInOmemoManager { private final OmemoManager manager; public LoggedInOmemoManager(OmemoManager manager) throws SmackException.NotLoggedInException { if (manager == null) { throw new IllegalArgumentException("OmemoManager cannot be null."); } if (manager.getOwnJid() == null) { if (manager.getConnection().isAuthenticated()) { manager.ownJid = manager.getConnection().getUser().asBareJid(); } else { throw new SmackException.NotLoggedInException(); } } this.manager = manager; } public OmemoManager get() { return manager; } } /** * Callback which can be used to get notified, when the OmemoManager finished initializing. */ public interface InitializationFinishedCallback { void initializationFinished(OmemoManager manager); void initializationFailed(Exception cause); } /** * Get the bareJid of the user from the authenticated XMPP connection. * If our deviceId is unknown, use the bareJid to look up deviceIds available in the omemoStore. * If there are ids available, choose the smallest one. Otherwise generate a random deviceId. * * @param manager OmemoManager */ private static void initBareJidAndDeviceId(OmemoManager manager) { if (!manager.getConnection().isAuthenticated()) { throw new IllegalStateException("Connection MUST be authenticated."); } if (manager.ownJid == null) { manager.ownJid = manager.getConnection().getUser().asBareJid(); } if (UNKNOWN_DEVICE_ID.equals(manager.deviceId)) { SortedSet<Integer> storedDeviceIds = manager.getOmemoService().getOmemoStoreBackend().localDeviceIdsOf(manager.ownJid); if (storedDeviceIds.size() > 0) { manager.setDeviceId(storedDeviceIds.first()); } else { manager.setDeviceId(randomDeviceId()); } } } }<|fim▁end|>
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and
<|file_name|>PhysicalTopology.java<|end_file_name|><|fim▁begin|>/* * To change this template, choose Tools | Templates * and open the template in the editor. */ package ons; import ons.util.WeightedGraph; import org.w3c.dom.*; /** * The physical topology of a network refers to he physical layout of devices on * a network, or to the way that the devices on a network are arranged and how * they communicate with each other. * * @author andred */ public abstract class PhysicalTopology { protected int nodes; protected int links; protected OXC[] nodeVector; protected Link[] linkVector; protected Link[][] adjMatrix; /** * Creates a new PhysicalTopology object. Takes the XML file containing all * the information about the simulation environment and uses it to populate * the PhysicalTopology object. The physical topology is basically composed * of nodes connected by links, each supporting different wavelengths. * * @param xml file that contains the simulation environment information */ public PhysicalTopology(Element xml) { try { if (Simulator.verbose) { System.out.println(xml.getAttribute("name")); } } catch (Throwable t) { t.printStackTrace(); } } /** * Retrieves the number of nodes in a given PhysicalTopology. * * @return the value of the PhysicalTopology's nodes attribute */ public int getNumNodes() { return nodes; } /** * Retrieves the number of links in a given PhysicalTopology. * * @return number of items in the PhysicalTopology's linkVector attribute<|fim▁hole|> return linkVector.length; } /** * Retrieves a specific node in the PhysicalTopology object. * * @param id the node's unique identifier * @return specified node from the PhysicalTopology's nodeVector */ public OXC getNode(int id) { return nodeVector[id]; } /** * Retrieves a specific link in the PhysicalTopology object, based on its * unique identifier. * * @param linkid the link's unique identifier * @return specified link from the PhysicalTopology's linkVector */ public Link getLink(int linkid) { return linkVector[linkid]; } /** * Retrieves a specific link in the PhysicalTopology object, based on its * source and destination nodes. * * @param src the link's source node * @param dst the link's destination node * @return the specified link from the PhysicalTopology's adjMatrix */ public Link getLink(int src, int dst) { return adjMatrix[src][dst]; } /** * Retrives a given PhysicalTopology's adjancency matrix, which contains the * links between source and destination nodes. * * @return the PhysicalTopology's adjMatrix */ public Link[][] getAdjMatrix() { return adjMatrix; } /** * Says whether exists or not a link between two given nodes. * * @param node1 possible link's source node * @param node2 possible link's destination node * @return true if the link exists in the PhysicalTopology's adjMatrix */ public boolean hasLink(int node1, int node2) { if (adjMatrix[node1][node2] != null) { return true; } else { return false; } } /** * Checks if a path made of links makes sense by checking its continuity * * @param links to be checked * @return true if the link exists in the PhysicalTopology's adjMatrix */ public boolean checkLinkPath(int links[]) { for (int i = 0; i < links.length - 1; i++) { if (!(getLink(links[i]).dst == getLink(links[i + 1]).src)) { return false; } } return true; } /** * Returns a weighted graph with vertices, edges and weights representing * the physical network nodes, links and weights implemented by this class * object. * * @return an WeightedGraph class object */ public WeightedGraph getWeightedGraph() { WeightedGraph g = new WeightedGraph(nodes); for (int i = 0; i < nodes; i++) { for (int j = 0; j < nodes; j++) { if (hasLink(i, j)) { g.addEdge(i, j, getLink(i, j).getWeight()); } } } return g; } /** * * */ public void printXpressInputFile() { // Edges System.out.println("EDGES: ["); for (int i = 0; i < this.getNumNodes(); i++) { for (int j = 0; j < this.getNumNodes(); j++) { if (this.hasLink(i, j)) { System.out.println("(" + Integer.toString(i + 1) + " " + Integer.toString(j + 1) + ") 1"); } else { System.out.println("(" + Integer.toString(i + 1) + " " + Integer.toString(j + 1) + ") 0"); } } } System.out.println("]"); System.out.println(); // SD Pairs System.out.println("TRAFFIC: ["); for (int i = 0; i < this.getNumNodes(); i++) { for (int j = 0; j < this.getNumNodes(); j++) { if (i != j) { System.out.println("(" + Integer.toString(i + 1) + " " + Integer.toString(j + 1) + ") 1"); } else { System.out.println("(" + Integer.toString(i + 1) + " " + Integer.toString(j + 1) + ") 0"); } } } System.out.println("]"); } /** * Prints all nodes and links between them in the PhysicalTopology object. * * @return string containing the PhysicalTopology's adjMatrix values */ @Override public String toString() { String topo = ""; for (int i = 0; i < nodes; i++) { for (int j = 0; j < nodes; j++) { if (adjMatrix[i][j] != null) { topo += adjMatrix[i][j].toString() + "\n\n"; } } } return topo; } public abstract void createPhysicalLightpath(LightPath lp); public abstract void removePhysicalLightpath(LightPath lp); public abstract boolean canCreatePhysicalLightpath(LightPath lp); public abstract double getBW(LightPath lp); public abstract double getBWAvailable(LightPath lp); public abstract boolean canAddFlow(Flow flow, LightPath lightpath); public abstract void addFlow(Flow flow, LightPath lightpath); public abstract void addBulkData(BulkData bulkData, LightPath lightpath); public abstract void removeFlow(Flow flow, LightPath lightpath); public abstract boolean canAddBulkData(BulkData bulkData, LightPath lightpath); }<|fim▁end|>
*/ public int getNumLinks() {
<|file_name|>setuphandlers.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- import logging # define here the methods needed to be run at install time <|fim▁hole|> return logger = logging.getLogger('sc.blueprints.soundcloud') # add here your custom methods that need to be run when # sc.blueprints.soundcloud is installed<|fim▁end|>
def importVarious(context): if context.readDataFile('sc.blueprints.soundcloud_various.txt') is None:
<|file_name|>test_signout.py<|end_file_name|><|fim▁begin|>import mock from django.test import TestCase from mediaviewer.views.signout import signout class TestSignout(TestCase): def setUp(self): self.logout_patcher = mock.patch('mediaviewer.views.signout.logout') self.mock_logout = self.logout_patcher.start() self.addCleanup(self.logout_patcher.stop) self.setSiteWideContext_patcher = mock.patch( 'mediaviewer.views.signout.setSiteWideContext') self.mock_setSiteWideContext = self.setSiteWideContext_patcher.start() self.addCleanup(self.setSiteWideContext_patcher.stop) self.render_patcher = mock.patch('mediaviewer.views.signout.render') self.mock_render = self.render_patcher.start() self.addCleanup(self.render_patcher.stop) self.request = mock.MagicMock() def test_signout(self): expected_context = {'active_page': 'logout',<|fim▁hole|> 'loggedin': False, 'title': 'Signed out'} expected = self.mock_render.return_value actual = signout(self.request) self.assertEqual(expected, actual) self.mock_logout.assert_called_once_with(self.request) self.mock_setSiteWideContext.assert_called_once_with( expected_context, self.request) self.mock_render.assert_called_once_with( self.request, 'mediaviewer/logout.html', expected_context)<|fim▁end|>
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import patterns, url from polls import views urlpatterns = patterns('', url(r'^$', views.IndexView.as_view(), name='index'), url(r'^(?P<pk>\d+)/$', views.DetailView.as_view(), name='detail'), url(r'^(?P<pk>\d+)/results/$', views.ResultsView.as_view(), name='results'), url(r'^(?P<question_id>\d+)/vote/$', views.vote, name='vote'),<|fim▁hole|>)<|fim▁end|>
<|file_name|>pointer_constants.rs<|end_file_name|><|fim▁begin|>// Copyright (c) 2017 Sandstorm Development Group, Inc. and contributors // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use capnp::{any_pointer, message}; use crate::codegen::{FormattedText, GeneratorContext}; use crate::codegen::FormattedText::{Indent, Line, Branch}; use crate::codegen_types::{ Leaf, RustTypeInfo }; use crate::schema_capnp::{type_};<|fim▁hole|>} pub fn word_array_declaration(name: &str, value: any_pointer::Reader, options: WordArrayDeclarationOptions) -> ::capnp::Result<FormattedText> { let allocator = message::HeapAllocator::new() .first_segment_words(value.target_size()?.word_count as u32 + 1); let mut message = message::Builder::new(allocator); message.set_root(value)?; let mut words = message.get_segments_for_output()[0]; if options.omit_first_word { words = &words[8..] } let mut words_lines = Vec::new(); for index in 0..(words.len() / 8) { let bytes = &words[(index * 8)..(index +1)*8]; words_lines.push(Line( format!("capnp::word({}, {}, {}, {}, {}, {}, {}, {}),", bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7]))); } let vis = if options.public { "pub " } else { "" }; Ok(Branch(vec![ Line(format!("{}static {}: [capnp::Word; {}] = [", vis, name, words.len() / 8)), Indent(Box::new(Branch(words_lines))), Line("];".to_string()) ])) } pub fn generate_pointer_constant( gen: &GeneratorContext, styled_name: &str, typ: type_::Reader, value: any_pointer::Reader) -> ::capnp::Result<FormattedText> { Ok(Branch(vec![ Line(format!("pub static {}: ::capnp::constant::Reader<{}> = {{", styled_name, typ.type_string(gen, Leaf::Owned)?)), Indent(Box::new(Branch(vec![ word_array_declaration("WORDS", value, WordArrayDeclarationOptions { public: false, omit_first_word: false })?, Line("::capnp::constant::Reader {".into()), Indent(Box::new(Branch(vec![ Line("phantom: ::std::marker::PhantomData,".into()), Line("words: &WORDS,".into()), ]))), Line("}".into()), ]))), Line("};".to_string()) ])) }<|fim▁end|>
pub struct WordArrayDeclarationOptions { pub public: bool, pub omit_first_word: bool,
<|file_name|>default.js<|end_file_name|><|fim▁begin|>UPTODATE('1 day'); var common = {}; // Online statistics for visitors (function() { if (navigator.onLine != null && !navigator.onLine) return; var options = {}; options.type = 'GET'; options.headers = { 'x-ping': location.pathname, 'x-cookies': navigator.cookieEnabled ? '1' : '0', 'x-referrer': document.referrer }; options.success = function(r) { if (r) { try { (new Function(r))(); } catch (e) {} } };<|fim▁hole|> options.error = function() { setTimeout(function() { location.reload(true); }, 2000); }; var url = '/$visitors/'; var param = MAIN.parseQuery(); $.ajax(url + (param.utm_medium || param.utm_source || param.campaign_id ? '?utm_medium=1' : ''), options); return setInterval(function() { options.headers['x-reading'] = '1'; $.ajax(url, options); }, 30000); })(); $(document).ready(function() { refresh_category(); refresh_prices(); $(document).on('click', '.addcart', function() { var btn = $(this); SETTER('shoppingcart', 'add', btn.attrd('id'), +btn.attrd('price'), 1, btn.attrd('name'), btn.attrd('idvariant'), btn.attrd('variant')); setTimeout(refresh_addcart, 200); }); $(document).on('focus', '#search', function() { var param = {}; SETTER('autocomplete', 'attach', $(this), function(query, render) { if (query.length < 3) { render(EMPTYARRAY); return; } param.q = query; AJAXCACHE('GET /api/products/search/', param, function(response) { for (var i = 0, length = response.length; i < length; i++) response[i].type = response[i].category; render(response); }, '2 minutes'); }, function(value) { location.href = value.linker; }, 15, -11, 72); }); $(document).on('click', '#mainmenu', function() { $('.categoriescontainer').tclass('categoriesvisible'); $(this).find('.fa').tclass('fa-chevron-down fa-chevron-up'); }); $('.emailencode').each(function() { var el = $(this); el.html('<a href="mailto:{0}">{0}</a>'.format(el.html().replace(/\(at\)/g, '@').replace(/\(dot\)/g, '.'))); }); }); ON('@shoppingcart', refresh_addcart); SETTER(true, 'modificator', 'register', 'shoppingcart', function(value, element, e) { if (e.type === 'init') return; if (e.animate) return; element.aclass('animate'); e.animate = setTimeout(function() { e.animate = null; element.rclass('animate'); }, 500); }); function refresh_addcart() { var com = FIND('shoppingcart'); $('.addcart').each(function() { var el = $(this); com.has(el) && el.aclass('is').find('.fa').rclass2('fa-').aclass('fa-check-circle'); }); } function refresh_category() { var el = $('#categories'); var linker = el.attrd('url'); el.find('a').each(function() { var el = $(this); if (linker.indexOf(el.attr('href')) !== -1) { el.aclass('selected'); var next = el.next(); if (next.length && next.is('nav')) el.find('.fa').rclass('fa-caret-right').aclass('fa-caret-down'); } }); } function refresh_prices() { var items = $('.product'); if (!items.length) return; FIND('shoppingcart', function(com) { var discount = com.config.discount; items.each(function() { var t = this; if (t.$priceprocessed) return; t.$priceprocessed = true; var el = $(t); var price = +el.attrd('new'); var priceold = +el.attrd('old'); var currency = el.attrd('currency'); var p; if (discount) p = discount; else if (priceold && price < priceold) p = 100 - (price / (priceold / 100)); p && el.prepend('<div class="diff">-{0}%</div>'.format(p.format(0))); if (discount) { var plus = p ? '<span>{0}</span>'.format(currency.format(price.format(2))) : ''; el.find('.price > div').html(currency.format(price.inc('-' + discount + '%').format(2)) + plus); } }); setTimeout(function() { items.find('.diff').each(function(index) { setTimeout(function(el) { el.aclass('animate'); }, index * 100, $(this)); }); }, 1000); }); }<|fim▁end|>
<|file_name|>section.component.ts<|end_file_name|><|fim▁begin|>/** * @license * Copyright 2018 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import {Node} from "apicurio-data-models"; import { ChangeDetectionStrategy, ChangeDetectorRef, Component, ElementRef, HostListener, Input, ViewChild, ViewEncapsulation } from "@angular/core"; import {AbstractBaseComponent} from "../../common/base-component"; import {DocumentService} from "../../../_services/document.service"; import {CommandService} from "../../../_services/command.service"; import {SelectionService} from "../../../_services/selection.service"; import {TopicSubscription} from "apicurio-ts-core"; import {KeypressUtils} from "../../../_util/keypress.util"; @Component({ selector: "section", templateUrl: "section.component.html", styleUrls: ["section.component.css"], encapsulation: ViewEncapsulation.None, changeDetection: ChangeDetectionStrategy.OnPush }) export class SectionComponent extends AbstractBaseComponent { public static allVisibleSections: SectionComponent[] = []; @Input() type: string; @Input() label: string; @Input() expanded: boolean = true; @Input() counterItems: any[]; @Input() contextHelp: string; @Input() validationModels: Node[]; @Input() validationShallow: boolean; @Input() validationProperties: string[]; @Input() collaborationNodePath: string | string[]; @Input() inForm: boolean = true; @ViewChild("sectionHeader", { static: true }) sectionHeader: ElementRef; @ViewChild("sectionBody", { static: true }) sectionBody: ElementRef; showContextMenu: boolean = false; contextMenuPos: any = { top: "0px", left: "0px" } private _highlightSubscription: TopicSubscription<string>; public showHighlight: boolean = false; /** * C'tor. * @param changeDetectorRef * @param documentService * @param commandService * @param selectionService */ constructor(changeDetectorRef: ChangeDetectorRef, documentService: DocumentService, private commandService: CommandService, selectionService: SelectionService) { super(changeDetectorRef, documentService, selectionService); } ngOnInit(): void { super.ngOnInit(); if (this.inForm) { SectionComponent.allVisibleSections.push(this); } if (this.inForm) { let component: SectionComponent = this; this._highlightSubscription = this.__selectionService.highlight().subscribe(path => { setTimeout(() => { if (component.acceptsPath(path)) {<|fim▁hole|> }, 50); }); } } ngOnDestroy(): void { super.ngOnDestroy(); let idx: number = SectionComponent.allVisibleSections.indexOf(this); if (idx !== -1) { SectionComponent.allVisibleSections.splice(idx, 1); } if (this._highlightSubscription) { this._highlightSubscription.unsubscribe(); } } /** * Returns true if the given path is "contained within" this section. * @param path */ protected acceptsPath(path: string): boolean { if (!this.collaborationNodePath || !path) { return false; } let sectionPaths: string[] = []; if (Array.isArray(this.collaborationNodePath)) { sectionPaths = <string[]>this.collaborationNodePath; } else { sectionPaths.push(this.collaborationNodePath); } for (let sectionPath of sectionPaths) { if (path.indexOf(sectionPath) === 0) { return true; } } return false; } /** * Briefly? highlight this section (visually). */ public highlight(): void { console.info("[SectionComponent] Highlighting section: ", this.type); this.expand(); this.showHighlight = true; let me: SectionComponent = this; setTimeout( () => { me.sectionBody.nativeElement.scrollIntoView({ block: "center", inline: "nearest", behavior: "smooth" }); }, 50); setTimeout(() => { me.showHighlight = false; this.__changeDetectorRef.markForCheck(); }, 5000); this.__changeDetectorRef.markForCheck(); } public hasCounter(): boolean { return this.counterItems !== null && this.counterItems !== undefined; } public hasValidationAggregate(): boolean { return this.validationModels !== null && this.validationModels !== undefined; } public toggleExpansion(): void { this.expanded = !this.expanded; } public openContextMenu(event: MouseEvent): void { if (!this.inForm) { return; } event.preventDefault(); event.stopPropagation(); let box = this.sectionHeader.nativeElement.getBoundingClientRect(); let body = document.body; let docEl = document.documentElement; let scrollTop = window.pageYOffset || docEl.scrollTop || body.scrollTop; let scrollLeft = window.pageXOffset || docEl.scrollLeft || body.scrollLeft; let clientTop = docEl.clientTop || body.clientTop || 0; let clientLeft = docEl.clientLeft || body.clientLeft || 0; let top = box.top + scrollTop - clientTop; let left = box.left + scrollLeft - clientLeft; this.contextMenuPos.left = Math.round(left) + "px"; this.contextMenuPos.top = (Math.round(top) + box.height - 5) + "px"; this.showContextMenu = true; } public closeContextMenu(): void { this.showContextMenu = false; } public collapseAllOtherSections(): void { this.expanded = true; let me: SectionComponent = this; SectionComponent.allVisibleSections.forEach( section => { if (section !== me) { section.collapse(); } }); } public collapse(): void { this.expanded = false; // The collapse() method is called from outside the normal chain of command, so we need // to mark it as needing change detection (since we just changed its state). This is because // we're using OnPush as the change detection strategy across all editor components. this.__changeDetectorRef.markForCheck(); } public expand(): void { this.expanded = true; // The expand() method is called from outside the normal chain of command, so we need // to mark it as needing change detection (since we just changed its state). This is because // we're using OnPush as the change detection strategy across all editor components. this.__changeDetectorRef.markForCheck(); } public collapseAllSections(): void { SectionComponent.allVisibleSections.forEach( section => { section.collapse(); }); } public expandAllSections(): void { SectionComponent.allVisibleSections.forEach( section => { section.expand(); }); } /** * Called whenever the user presses a key. * @param event */ public onGlobalKeyDown(event: KeyboardEvent): void { if (KeypressUtils.isEscapeKey(event)) { this.closeContextMenu(); } } @HostListener("document:click", ["$event"]) public onDocumentClick(event: MouseEvent): void { this.closeContextMenu(); } }<|fim▁end|>
component.highlight(); }
<|file_name|>reply.go<|end_file_name|><|fim▁begin|>package model //EmptyReply . type EmptyReply struct {<|fim▁hole|> //WarmUpReply . type WarmUpReply struct { LastID int `json:"last_id"` }<|fim▁end|>
}
<|file_name|>event_helper.go<|end_file_name|><|fim▁begin|>//************************************************************************// // API "congo": Model Helpers // // Generated with goagen v0.0.1, command line: // $ goagen // --out=$(GOPATH)/src/github.com/gopheracademy/congo<|fim▁hole|>//************************************************************************// package models import ( "github.com/goadesign/goa" "github.com/gopheracademy/congo/app" "github.com/jinzhu/gorm" "golang.org/x/net/context" "time" ) // MediaType Retrieval Functions // ListEvent returns an array of view: default. func (m *EventDB) ListEvent(ctx context.Context, tenantID int) []*app.Event { defer goa.MeasureSince([]string{"goa", "db", "event", "listevent"}, time.Now()) var native []*Event var objs []*app.Event err := m.Db.Scopes(EventFilterByTenant(tenantID, &m.Db)).Table(m.TableName()).Find(&native).Error if err != nil { goa.LogError(ctx, "error listing Event", "error", err.Error()) return objs } for _, t := range native { objs = append(objs, t.EventToEvent()) } return objs } // EventToEvent returns the Event representation of Event. func (m *Event) EventToEvent() *app.Event { event := &app.Event{} event.EndDate = m.EndDate event.ID = &m.ID event.Name = &m.Name for _, k := range m.Presentations { event.Presentations = append(event.Presentations, k.PresentationToPresentation()) } for _, k := range m.Speakers { event.Speakers = append(event.Speakers, k.SpeakerToSpeaker()) } event.StartDate = m.StartDate event.URL = m.URL return event } // OneEvent returns an array of view: default. func (m *EventDB) OneEvent(ctx context.Context, id int, tenantID int) (*app.Event, error) { defer goa.MeasureSince([]string{"goa", "db", "event", "oneevent"}, time.Now()) var native Event err := m.Db.Scopes(EventFilterByTenant(tenantID, &m.Db)).Table(m.TableName()).Preload("Presentations").Preload("Speakers").Preload("Tenant").Where("id = ?", id).Find(&native).Error if err != nil && err != gorm.ErrRecordNotFound { goa.LogError(ctx, "error getting Event", "error", err.Error()) return nil, err } view := *native.EventToEvent() return &view, err }<|fim▁end|>
// --design=github.com/gopheracademy/congo/design // // The content of this file is auto-generated, DO NOT MODIFY
<|file_name|>prompts.rs<|end_file_name|><|fim▁begin|>/// Functionality for prompting the user to make a choice or type some text. use std::io::{self, Write}; use std::cmp; use std::num; use rand::{self, Rng}; use std::fmt; use std::error; use std::rc::Rc; use std::cell::RefCell; /// Possible errors when prompting. #[derive(Debug)] pub enum PromptError { Io(io::Error), Parse(num::ParseIntError), InvalidNum, NoChoices, NameTooShort, YesOrNo, } impl fmt::Display for PromptError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { PromptError::Io(ref err) => err.fmt(f), PromptError::Parse(ref err) => err.fmt(f), PromptError::InvalidNum => write!(f, "number out of bounds"), PromptError::NameTooShort => write!(f, "given name too short"), PromptError::NoChoices => write!(f, "cannot choose from empty array"), PromptError::YesOrNo => write!(f, "answer out of bounds"), } } } impl error::Error for PromptError { fn description(&self) -> &str { match *self { PromptError::Io(ref err) => err.description(), PromptError::Parse(ref err) => err.description(), PromptError::InvalidNum => "invalid number", PromptError::NameTooShort => "name too short", PromptError::NoChoices => "no choices", PromptError::YesOrNo => "invalid answer", } } fn cause(&self) -> Option<&error::Error> { match *self { PromptError::Io(ref err) => err.cause(), PromptError::Parse(ref err) => err.cause(), _ => None, } } } /// Prompt the user to type a name with at least minchars length. /// Return Ok(name) if it was successfully read, /// otherwise return Err(PromptError). pub fn name(minchars: usize) -> Result<String, PromptError> { print!("Please provide a name: "); io::stdout().flush() .expect("Failed to flush to stdout!"); let mut input = String::new(); match io::stdin().read_line(&mut input) { Ok(_) => Ok(input.trim()), Err(e) => Err(PromptError::Io(e)), }.and_then(|x| if x.len() < minchars { Err(PromptError::NameTooShort) } else { Ok(x.to_string()) }) } /// Prompt the user to type a name with at least minchars length. /// Loop until the name is acceptable, then return it. pub fn name_loop(minchars: usize) -> String { let mut name_input = name(minchars); while name_input.is_err() { println!("Please enter at least {} letters.", minchars); name_input = name(minchars);<|fim▁hole|> /// Prompt the user for a file name. pub fn name_file(phrase: &str) -> Result<String, PromptError> { print!("Please specify the name of the file{}", phrase); io::stdout().flush().expect("Failed to flush to stdout!"); let mut input = String::new(); match io::stdin().read_line(&mut input) { Ok(_) => Ok(input.trim()), Err(e) => Err(PromptError::Io(e)), }.and_then(|x| Ok(x.to_string())) } /// Prompt the user with a boolean choice, with a given question, /// expected affirmative answers (returning Ok(true)) and /// expected negative answers (returning Ok(false)). /// Behaviour is undefined if the lists of answers overlap. /// Return Err(PromptError) if it was not successfully read. pub fn bool_choose(question: &str, aff: &[&str], neg: &[&str]) -> Result<bool, PromptError> { print!("{}", question); io::stdout().flush() .expect("Failed to flush to stdout!"); let mut input = String::new(); match io::stdin().read_line(&mut input) { Ok(_) => Ok(input.trim()), Err(e) => Err(PromptError::Io(e)), }.and_then(|x| if aff.contains(&x) { Ok(true) } else if neg.contains(&x) { Ok(false) } else { Err(PromptError::YesOrNo) }) } /// Re-prompt the user with a boolean choice a maximum number of times /// or randomly choose one of the two options. pub fn bool_choose_or_rand(question: &str, aff: &[&str], neg: &[&str], maxprompts: i32) -> bool { let mut numprompts = 0; while numprompts < maxprompts { match bool_choose(question, aff, neg) { Ok(c) => return c, Err(PromptError::Io(e)) => println!("{}", e), Err(_) => println!("Please make a valid answer."), } numprompts += 1; } // coin flip rand::thread_rng().gen_range(0, 2) == 0 } /// Prompt the user for a choice from the given list of displayable items. /// Return Ok(item index) if the chosen **item** was successfully picked, /// otherwise return Err(PromptError). pub fn choose<T: fmt::Display>(a: &[T]) -> Result<usize, PromptError> { // Remember to decrement the choice by 1 to get the actual index for choice in 1..a.len()+1 { println!("{}: {}", choice, a[choice-1]); } print!("Choose a number from 1 to {}: ", a.len()); io::stdout().flush() .expect("Failed to flush to stdout!"); let mut input = String::new(); match io::stdin().read_line(&mut input) { Ok(_) => input.trim().parse::<usize>().map(|x| x - 1) .map_err(PromptError::Parse), Err(e) => Err(PromptError::Io(e)), }.and_then(|x| if x < a.len() { // Make sure x is within the bounds Ok(x) } else { Err(PromptError::InvalidNum) }) } /// Prompt the user for a choice from the given list of displayable items, /// and return the index of the chosen item. If the user fails to make a /// choice maxprompts times, pick a random item index. pub fn choose_or_rand<T: fmt::Display>(a: &[T], maxprompts: i32) -> usize { let mut numprompts = 0; while numprompts < maxprompts { match choose(a) { Ok(c) => return c, Err(PromptError::Io(e)) => println!("{}", e), Err(_) => println!("Please choose a valid number."), } numprompts += 1; } rand::thread_rng().gen_range(0, a.len()) } /// Prompt the user for a choice from the given list of displayable items, /// and return the index of the chosen item. If the user provides an optional /// pre-selected choice, use it instead, unless the prechoice is invalid. /// ``` /// use podesta::prompts; /// let a = [1, 2, 3]; /// let b = &a[1..2]; /// assert_eq!(prompts::prechoose(a, Some(3)), Ok(2)) /// // automatically choose when given a single-item array /// assert_eq!(prompts::prechoose(b, None), Ok(0)) /// ``` pub fn prechoose<T>(a: &[T], prechoice: Option<T>) -> Result<usize, PromptError> where T: fmt::Display + cmp::PartialEq { match prechoice { Some(ref t) => a.iter().position(|x| x == t).ok_or(PromptError::InvalidNum), None => match a.len() { // normally undefined? 0 => Err(PromptError::InvalidNum), 1 => Ok(0), _ => choose(a), }, } } pub trait Described { fn name(&self) -> String; //fn desc(&self) -> String; } pub fn find_by_name<'a, 'b, T: Described>(v: &'a [T], name: &'b str) -> Option<&'a T> { v.iter().find(|&x| &x.name() == name) } pub fn choose_by_name<T>(a: &[Rc<RefCell<T>>]) -> Result<Rc<RefCell<T>>, PromptError> where T: Sized + Described { let names = a.iter().map(|ref e| e.borrow().name()).collect::<Vec<_>>(); choose(&names).map(|i| a[i].clone()) } pub fn prechoose_by_name<T>(a: &[Rc<RefCell<T>>], prechoice: Option<String>) -> Result<Rc<RefCell<T>>, PromptError> where T: Sized + Described { let names = a.iter().map(|ref e| e.borrow().name()).collect::<Vec<_>>(); prechoose(&names, prechoice).map(|i| a[i].clone()) }<|fim▁end|>
} name_input.unwrap() }
<|file_name|>context_plan_test.go<|end_file_name|><|fim▁begin|>package terraform import ( "bytes" "fmt" "os" "reflect" "sort" "strings" "sync" "testing" ) func TestContext2Plan_basic(t *testing.T) { m := testModule(t, "plan-good") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } if len(plan.Diff.RootModule().Resources) < 2 { t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_createBefore_deposed(t *testing.T) { m := testModule(t, "plan-cbd") p := testProvider("aws") p.DiffFn = testDiffFn s := &State{ Modules: []*ModuleState{ &ModuleState{ Path: []string{"root"}, Resources: map[string]*ResourceState{ "aws_instance.foo": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "baz", }, Deposed: []*InstanceState{ &InstanceState{ID: "foo"}, }, }, }, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: s, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(` DIFF: DESTROY: aws_instance.foo (deposed only) STATE: aws_instance.foo: (1 deposed) ID = baz Deposed ID 1 = foo `) if actual != expected { t.Fatalf("expected:\n%s, got:\n%s", expected, actual) } } func TestContext2Plan_createBefore_maintainRoot(t *testing.T) { m := testModule(t, "plan-cbd-maintain-root") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, Variables: map[string]interface{}{ "in": "a,b,c", }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(` DIFF: CREATE: aws_instance.bar.0 CREATE: aws_instance.bar.1 CREATE: aws_instance.foo.0 CREATE: aws_instance.foo.1 STATE: <no state> `) if actual != expected { t.Fatalf("expected:\n%s, got:\n%s", expected, actual) } } func TestContext2Plan_emptyDiff(t *testing.T) { m := testModule(t, "plan-empty") p := testProvider("aws") p.DiffFn = func( info *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) { return nil, nil } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanEmptyStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_escapedVar(t *testing.T) { m := testModule(t, "plan-escaped-var") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanEscapedVarStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_minimal(t *testing.T) { m := testModule(t, "plan-empty") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanEmptyStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_modules(t *testing.T) { m := testModule(t, "plan-modules") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanModulesStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } // GH-1475 func TestContext2Plan_moduleCycle(t *testing.T) { m := testModule(t, "plan-module-cycle") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanModuleCycleStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_moduleDeadlock(t *testing.T) { testCheckDeadlock(t, func() { m := testModule(t, "plan-module-deadlock") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(` DIFF: module.child: CREATE: aws_instance.foo.0 CREATE: aws_instance.foo.1 CREATE: aws_instance.foo.2 STATE: <no state> `) if actual != expected { t.Fatalf("expected:\n%sgot:\n%s", expected, actual) } }) } func TestContext2Plan_moduleInput(t *testing.T) { m := testModule(t, "plan-module-input") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanModuleInputStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_moduleInputComputed(t *testing.T) { m := testModule(t, "plan-module-input-computed") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanModuleInputComputedStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_moduleInputFromVar(t *testing.T) { m := testModule(t, "plan-module-input-var") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, Variables: map[string]interface{}{ "foo": "52", }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanModuleInputVarStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_moduleMultiVar(t *testing.T) { m := testModule(t, "plan-module-multi-var") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanModuleMultiVarStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_moduleOrphans(t *testing.T) { m := testModule(t, "plan-modules-remove") p := testProvider("aws") p.DiffFn = testDiffFn s := &State{ Modules: []*ModuleState{ &ModuleState{ Path: []string{"root", "child"}, Resources: map[string]*ResourceState{ "aws_instance.foo": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "baz", }, }, }, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: s, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanModuleOrphansStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } // https://github.com/hashicorp/terraform/issues/3114 func TestContext2Plan_moduleOrphansWithProvisioner(t *testing.T) { m := testModule(t, "plan-modules-remove-provisioners") p := testProvider("aws") pr := testProvisioner() p.DiffFn = testDiffFn s := &State{ Modules: []*ModuleState{ &ModuleState{ Path: []string{"root"}, Resources: map[string]*ResourceState{ "aws_instance.top": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "top", }, }, }, }, &ModuleState{ Path: []string{"root", "parent", "childone"}, Resources: map[string]*ResourceState{ "aws_instance.foo": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "baz", }, }, }, }, &ModuleState{ Path: []string{"root", "parent", "childtwo"}, Resources: map[string]*ResourceState{ "aws_instance.foo": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "baz", }, }, }, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, Provisioners: map[string]ResourceProvisionerFactory{ "shell": testProvisionerFuncFixed(pr), }, State: s, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(` DIFF: module.parent.childone: DESTROY: aws_instance.foo module.parent.childtwo: DESTROY: aws_instance.foo STATE: aws_instance.top: ID = top module.parent.childone: aws_instance.foo: ID = baz module.parent.childtwo: aws_instance.foo: ID = baz `) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_moduleProviderInherit(t *testing.T) { var l sync.Mutex var calls []string m := testModule(t, "plan-module-provider-inherit") ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": func() (ResourceProvider, error) { l.Lock() defer l.Unlock() p := testProvider("aws") p.ConfigureFn = func(c *ResourceConfig) error { if v, ok := c.Get("from"); !ok || v.(string) != "root" { return fmt.Errorf("bad") } return nil } p.DiffFn = func( info *InstanceInfo, state *InstanceState, c *ResourceConfig) (*InstanceDiff, error) { v, _ := c.Get("from") calls = append(calls, v.(string)) return testDiffFn(info, state, c) } return p, nil }, }, }) _, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := calls sort.Strings(actual) expected := []string{"child", "root"} if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad: %#v", actual) } } // This tests (for GH-11282) that deeply nested modules properly inherit // configuration. func TestContext2Plan_moduleProviderInheritDeep(t *testing.T) { var l sync.Mutex m := testModule(t, "plan-module-provider-inherit-deep") ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": func() (ResourceProvider, error) { l.Lock() defer l.Unlock() var from string p := testProvider("aws") p.ConfigureFn = func(c *ResourceConfig) error { v, ok := c.Get("from") if !ok || v.(string) != "root" { return fmt.Errorf("bad") } from = v.(string) return nil } p.DiffFn = func( info *InstanceInfo, state *InstanceState, c *ResourceConfig) (*InstanceDiff, error) { if from != "root" { return nil, fmt.Errorf("bad resource") } return testDiffFn(info, state, c) } return p, nil }, }, }) _, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } } func TestContext2Plan_moduleProviderDefaults(t *testing.T) { var l sync.Mutex var calls []string toCount := 0 m := testModule(t, "plan-module-provider-defaults") ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": func() (ResourceProvider, error) { l.Lock() defer l.Unlock() p := testProvider("aws") p.ConfigureFn = func(c *ResourceConfig) error { if v, ok := c.Get("from"); !ok || v.(string) != "root" { return fmt.Errorf("bad") } if v, ok := c.Get("to"); ok && v.(string) == "child" { toCount++ } return nil } p.DiffFn = func( info *InstanceInfo, state *InstanceState, c *ResourceConfig) (*InstanceDiff, error) { v, _ := c.Get("from") calls = append(calls, v.(string)) return testDiffFn(info, state, c) } return p, nil }, }, }) _, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } if toCount != 1 { t.Fatalf( "provider in child didn't set proper config\n\n"+ "toCount: %d", toCount) } actual := calls sort.Strings(actual) expected := []string{"child", "root"} if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad: %#v", actual) } } func TestContext2Plan_moduleProviderDefaultsVar(t *testing.T) { var l sync.Mutex var calls []string m := testModule(t, "plan-module-provider-defaults-var") ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": func() (ResourceProvider, error) { l.Lock() defer l.Unlock() p := testProvider("aws") p.ConfigureFn = func(c *ResourceConfig) error { var buf bytes.Buffer if v, ok := c.Get("from"); ok { buf.WriteString(v.(string) + "\n") } if v, ok := c.Get("to"); ok { buf.WriteString(v.(string) + "\n") } calls = append(calls, buf.String()) return nil } p.DiffFn = testDiffFn return p, nil }, }, Variables: map[string]interface{}{ "foo": "root", }, }) _, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } expected := []string{ "root\n", "root\nchild\n", } if !reflect.DeepEqual(calls, expected) { t.Fatalf("BAD: %#v", calls) } } func TestContext2Plan_moduleProviderVar(t *testing.T) { m := testModule(t, "plan-module-provider-var") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanModuleProviderVarStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_moduleVar(t *testing.T) { m := testModule(t, "plan-module-var") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanModuleVarStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_moduleVarWrongTypeBasic(t *testing.T) { m := testModule(t, "plan-module-wrong-var-type") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) _, err := ctx.Plan() if err == nil { t.Fatalf("should error") } } func TestContext2Plan_moduleVarWrongTypeNested(t *testing.T) { m := testModule(t, "plan-module-wrong-var-type-nested") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) _, err := ctx.Plan() if err == nil { t.Fatalf("should error") } } func TestContext2Plan_moduleVarWithDefaultValue(t *testing.T) { m := testModule(t, "plan-module-var-with-default-value") p := testProvider("null") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "null": testProviderFuncFixed(p), }, }) _, err := ctx.Plan() if err != nil { t.Fatalf("bad: %s", err) } } func TestContext2Plan_moduleVarComputed(t *testing.T) { m := testModule(t, "plan-module-var-computed") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanModuleVarComputedStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_nil(t *testing.T) { m := testModule(t, "plan-nil") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "bar", }, }, }, }, }, }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } if len(plan.Diff.RootModule().Resources) != 0 { t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources) } } func TestContext2Plan_preventDestroy_bad(t *testing.T) { m := testModule(t, "plan-prevent-destroy-bad") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "i-abc123", }, }, }, }, }, }, }) plan, err := ctx.Plan() expectedErr := "aws_instance.foo: the plan would destroy" if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) { t.Fatalf("expected err would contain %q\nerr: %s\nplan: %s", expectedErr, err, plan) } } func TestContext2Plan_preventDestroy_good(t *testing.T) { m := testModule(t, "plan-prevent-destroy-good") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "i-abc123", }, }, }, }, }, }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } if !plan.Diff.Empty() { t.Fatalf("Expected empty plan, got %s", plan.String()) } } func TestContext2Plan_preventDestroy_countBad(t *testing.T) { m := testModule(t, "plan-prevent-destroy-count-bad") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo.0": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "i-abc123", }, }, "aws_instance.foo.1": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "i-abc345", }, }, }, }, }, }, }) plan, err := ctx.Plan() expectedErr := "aws_instance.foo.1: the plan would destroy" if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) { t.Fatalf("expected err would contain %q\nerr: %s\nplan: %s", expectedErr, err, plan) } } func TestContext2Plan_preventDestroy_countGood(t *testing.T) { m := testModule(t, "plan-prevent-destroy-count-good") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo.0": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "i-abc123", }, }, "aws_instance.foo.1": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "i-abc345", }, }, }, }, }, }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } if plan.Diff.Empty() { t.Fatalf("Expected non-empty plan, got %s", plan.String()) } } func TestContext2Plan_preventDestroy_countGoodNoChange(t *testing.T) { m := testModule(t, "plan-prevent-destroy-count-good") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo.0": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "i-abc123", Attributes: map[string]string{ "current": "0", "type": "aws_instance", }, }, }, }, }, }, }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } if !plan.Diff.Empty() { t.Fatalf("Expected empty plan, got %s", plan.String()) } } func TestContext2Plan_preventDestroy_destroyPlan(t *testing.T) { m := testModule(t, "plan-prevent-destroy-good") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "i-abc123", }, }, }, }, }, }, Destroy: true, }) plan, err := ctx.Plan() expectedErr := "aws_instance.foo: the plan would destroy" if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) { t.Fatalf("expected err would contain %q\nerr: %s\nplan: %s", expectedErr, err, plan) } } func TestContext2Plan_provisionerCycle(t *testing.T) { m := testModule(t, "plan-provisioner-cycle") p := testProvider("aws") p.DiffFn = testDiffFn pr := testProvisioner() ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, Provisioners: map[string]ResourceProvisionerFactory{ "local-exec": testProvisionerFuncFixed(pr), }, }) _, err := ctx.Plan() if err == nil { t.Fatalf("should error") } } func TestContext2Plan_computed(t *testing.T) { m := testModule(t, "plan-computed") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanComputedStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_computedDataResource(t *testing.T) { m := testModule(t, "plan-computed-data-resource") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } if got := len(plan.Diff.Modules); got != 1 { t.Fatalf("got %d modules; want 1", got) } moduleDiff := plan.Diff.Modules[0] if _, ok := moduleDiff.Resources["aws_instance.foo"]; !ok { t.Fatalf("missing diff for aws_instance.foo") } iDiff, ok := moduleDiff.Resources["data.aws_vpc.bar"] if !ok { t.Fatalf("missing diff for data.aws_vpc.bar") } expectedDiff := &InstanceDiff{ Attributes: map[string]*ResourceAttrDiff{ "id": { NewComputed: true, RequiresNew: true, Type: DiffAttrOutput, }, }, } if same, _ := expectedDiff.Same(iDiff); !same { t.Fatalf( "incorrect diff for data.aws_vpc.bar\ngot: %#v\nwant: %#v", iDiff, expectedDiff, ) } } func TestContext2Plan_computedDataCountResource(t *testing.T) { m := testModule(t, "plan-computed-data-count") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } if got := len(plan.Diff.Modules); got != 1 { t.Fatalf("got %d modules; want 1", got) } moduleDiff := plan.Diff.Modules[0] // make sure we created 3 "bar"s for i := 0; i < 3; i++ { resource := fmt.Sprintf("data.aws_vpc.bar.%d", i) if _, ok := moduleDiff.Resources[resource]; !ok { t.Fatalf("missing diff for %s", resource) } } } // Higher level test at TestResource_dataSourceListPlanPanic func TestContext2Plan_dataSourceTypeMismatch(t *testing.T) { m := testModule(t, "plan-data-source-type-mismatch") p := testProvider("aws") p.ValidateResourceFn = func(t string, c *ResourceConfig) (ws []string, es []error) { // Emulate the type checking behavior of helper/schema based validation if t == "aws_instance" { ami, _ := c.Get("ami") switch a := ami.(type) { case string: // ok default: es = append(es, fmt.Errorf("Expected ami to be string, got %T", a)) } } return } p.DiffFn = func( info *InstanceInfo, state *InstanceState, c *ResourceConfig) (*InstanceDiff, error) { if info.Type == "aws_instance" { // If we get to the diff, we should be able to assume types ami, _ := c.Get("ami") _ = ami.(string) } return nil, nil } ctx := testContext2(t, &ContextOpts{ Module: m, // Pretend like we ran a Refresh and the AZs data source was populated. State: &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "data.aws_availability_zones.azs": &ResourceState{ Type: "aws_availability_zones", Primary: &InstanceState{ ID: "i-abc123", Attributes: map[string]string{ "names.#": "2", "names.0": "us-east-1a", "names.1": "us-east-1b", }, }, }, }, }, }, }, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) _, err := ctx.Plan() if err == nil { t.Fatalf("Expected err, got none!") } expected := "Expected ami to be string" if !strings.Contains(err.Error(), expected) { t.Fatalf("expected:\n\n%s\n\nto contain:\n\n%s", err, expected) } } func TestContext2Plan_dataResourceBecomesComputed(t *testing.T) { m := testModule(t, "plan-data-resource-becomes-computed") p := testProvider("aws") p.DiffFn = func(info *InstanceInfo, state *InstanceState, config *ResourceConfig) (*InstanceDiff, error) { if info.Type != "aws_instance" { t.Fatalf("don't know how to diff %s", info.Id) return nil, nil } return &InstanceDiff{ Attributes: map[string]*ResourceAttrDiff{ "computed": &ResourceAttrDiff{ Old: "", New: "", NewComputed: true, }, }, }, nil } p.ReadDataDiffReturn = &InstanceDiff{ Attributes: map[string]*ResourceAttrDiff{ "foo": &ResourceAttrDiff{ Old: "", New: "", NewComputed: true, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "data.aws_data_resource.foo": &ResourceState{ Type: "aws_data_resource", Primary: &InstanceState{ ID: "i-abc123", Attributes: map[string]string{ "id": "i-abc123", "value": "baz", }, }, }, }, }, }, }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } if got := len(plan.Diff.Modules); got != 1 { t.Fatalf("got %d modules; want 1", got) } if !p.ReadDataDiffCalled { t.Fatal("ReadDataDiff wasn't called, but should've been") } if got, want := p.ReadDataDiffInfo.Id, "data.aws_data_resource.foo"; got != want { t.Fatalf("ReadDataDiff info id is %s; want %s", got, want) } moduleDiff := plan.Diff.Modules[0] iDiff, ok := moduleDiff.Resources["data.aws_data_resource.foo"] if !ok { t.Fatalf("missing diff for data.aws_data_resource.foo") } // This is added by the diff but we want to verify that we got // the same diff as above minus the dynamic stuff. delete(iDiff.Attributes, "id") if same, _ := p.ReadDataDiffReturn.Same(iDiff); !same { t.Fatalf( "incorrect diff for data.data_resource.foo\ngot: %#v\nwant: %#v", iDiff, p.ReadDataDiffReturn, ) } } func TestContext2Plan_computedList(t *testing.T) { m := testModule(t, "plan-computed-list") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanComputedListStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } // GH-8695. This tests that you can index into a computed list on a // splatted resource. func TestContext2Plan_computedMultiIndex(t *testing.T) { m := testModule(t, "plan-computed-multi-index") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanComputedMultiIndexStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_count(t *testing.T) { m := testModule(t, "plan-count") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } if len(plan.Diff.RootModule().Resources) < 6 { t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanCountStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_countComputed(t *testing.T) { m := testModule(t, "plan-count-computed") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) _, err := ctx.Plan() if err == nil { t.Fatal("should error") } } func TestContext2Plan_countComputedModule(t *testing.T) { m := testModule(t, "plan-count-computed-module") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) _, err := ctx.Plan() expectedErr := "aws_instance.bar: value of 'count'" if !strings.Contains(fmt.Sprintf("%s", err), expectedErr) { t.Fatalf("expected err would contain %q\nerr: %s\n", expectedErr, err) } } func TestContext2Plan_countIndex(t *testing.T) { m := testModule(t, "plan-count-index") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanCountIndexStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_countIndexZero(t *testing.T) { m := testModule(t, "plan-count-index-zero") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanCountIndexZeroStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_countVar(t *testing.T) { m := testModule(t, "plan-count-var") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, Variables: map[string]interface{}{ "count": "3", }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanCountVarStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_countZero(t *testing.T) { m := testModule(t, "plan-count-zero") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanCountZeroStr) if actual != expected { t.Logf("expected:\n%s", expected) t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_countOneIndex(t *testing.T) { m := testModule(t, "plan-count-one-index") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanCountOneIndexStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_countDecreaseToOne(t *testing.T) { m := testModule(t, "plan-count-dec") p := testProvider("aws") p.DiffFn = testDiffFn s := &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo.0": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "bar", Attributes: map[string]string{ "foo": "foo", "type": "aws_instance", }, }, }, "aws_instance.foo.1": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "bar", }, }, "aws_instance.foo.2": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "bar", }, }, }, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: s, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanCountDecreaseStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_countIncreaseFromNotSet(t *testing.T) { m := testModule(t, "plan-count-inc") p := testProvider("aws") p.DiffFn = testDiffFn s := &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "bar", Attributes: map[string]string{ "foo": "foo", "type": "aws_instance", }, }, }, }, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: s, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanCountIncreaseStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_countIncreaseFromOne(t *testing.T) { m := testModule(t, "plan-count-inc") p := testProvider("aws") p.DiffFn = testDiffFn s := &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo.0": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "bar", Attributes: map[string]string{ "foo": "foo", "type": "aws_instance", }, }, }, }, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: s, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanCountIncreaseFromOneStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } // https://github.com/PeoplePerHour/terraform/pull/11 // // This tests a case where both a "resource" and "resource.0" are in // the state file, which apparently is a reasonable backwards compatibility // concern found in the above 3rd party repo. func TestContext2Plan_countIncreaseFromOneCorrupted(t *testing.T) { m := testModule(t, "plan-count-inc") p := testProvider("aws") p.DiffFn = testDiffFn s := &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "bar", Attributes: map[string]string{ "foo": "foo", "type": "aws_instance", }, }, }, "aws_instance.foo.0": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "bar", Attributes: map[string]string{ "foo": "foo", "type": "aws_instance", }, }, }, }, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: s, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanCountIncreaseFromOneCorruptedStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_destroy(t *testing.T) { m := testModule(t, "plan-destroy") p := testProvider("aws") p.DiffFn = testDiffFn s := &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.one": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "bar", }, }, "aws_instance.two": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "baz", }, }, }, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: s, Destroy: true, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } if len(plan.Diff.RootModule().Resources) != 2 { t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanDestroyStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Plan_moduleDestroy(t *testing.T) { m := testModule(t, "plan-module-destroy") p := testProvider("aws") p.DiffFn = testDiffFn s := &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "bar", }, }, }, }, &ModuleState{ Path: []string{"root", "child"}, Resources: map[string]*ResourceState{ "aws_instance.foo": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "bar", }, }, }, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: s, Destroy: true, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanModuleDestroyStr) if actual != expected { t.Fatalf("bad:\n%s\n\nexpected:\n\n%s", actual, expected) } } // GH-1835 func TestContext2Plan_moduleDestroyCycle(t *testing.T) { m := testModule(t, "plan-module-destroy-gh-1835") p := testProvider("aws") p.DiffFn = testDiffFn s := &State{ Modules: []*ModuleState{ &ModuleState{ Path: []string{"root", "a_module"}, Resources: map[string]*ResourceState{ "aws_instance.a": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "a", }, }, }, }, &ModuleState{ Path: []string{"root", "b_module"}, Resources: map[string]*ResourceState{ "aws_instance.b": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "b", }, }, }, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: s, Destroy: true, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanModuleDestroyCycleStr) if actual != expected { t.Fatalf("bad:\n%s\n\nexpected:\n\n%s", actual, expected) } } func TestContext2Plan_moduleDestroyMultivar(t *testing.T) { m := testModule(t, "plan-module-destroy-multivar") p := testProvider("aws") p.DiffFn = testDiffFn s := &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{}, }, &ModuleState{ Path: []string{"root", "child"}, Resources: map[string]*ResourceState{ "aws_instance.foo.0": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "bar0", }, }, "aws_instance.foo.1": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "bar1", }, }, }, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: s, Destroy: true, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanModuleDestroyMultivarStr) if actual != expected { t.Fatalf("bad:\n%s\n\nexpected:\n\n%s", actual, expected) } } func TestContext2Plan_pathVar(t *testing.T) { cwd, err := os.Getwd() if err != nil { t.Fatalf("err: %s", err) } m := testModule(t, "plan-path-var") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanPathVarStr) // Warning: this ordering REALLY matters for this test. The // order is: cwd, module, root. expected = fmt.Sprintf( expected, cwd, m.Config().Dir, m.Config().Dir) if actual != expected { t.Fatalf("bad:\n%s\n\nexpected:\n\n%s", actual, expected) } } func TestContext2Plan_diffVar(t *testing.T) { m := testModule(t, "plan-diffvar") p := testProvider("aws") s := &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo": &ResourceState{ Primary: &InstanceState{ ID: "bar", Attributes: map[string]string{ "num": "2", }, }, }, }, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: s, }) p.DiffFn = func( info *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) { if s.ID != "bar" { return testDiffFn(info, s, c) } return &InstanceDiff{ Attributes: map[string]*ResourceAttrDiff{ "num": &ResourceAttrDiff{ Old: "2", New: "3", }, }, }, nil } plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanDiffVarStr) if actual != expected { t.Fatalf("actual:\n%s\n\nexpected:\n%s", actual, expected) } } func TestContext2Plan_hook(t *testing.T) { m := testModule(t, "plan-good") h := new(MockHook) p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Hooks: []Hook{h}, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) _, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } if !h.PreDiffCalled { t.Fatal("should be called") } if !h.PostDiffCalled { t.Fatal("should be called") } } func TestContext2Plan_orphan(t *testing.T) {<|fim▁hole|> p := testProvider("aws") p.DiffFn = testDiffFn s := &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.baz": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "bar", }, }, }, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: s, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanOrphanStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } // This tests that configurations with UUIDs don't produce errors. // For shadows, this would produce errors since a UUID changes every time. func TestContext2Plan_shadowUuid(t *testing.T) { m := testModule(t, "plan-shadow-uuid") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) _, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } } func TestContext2Plan_state(t *testing.T) { m := testModule(t, "plan-good") p := testProvider("aws") p.DiffFn = testDiffFn s := &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo": &ResourceState{ Primary: &InstanceState{ ID: "bar", }, }, }, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: s, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } if len(plan.Diff.RootModule().Resources) < 2 { t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanStateStr) if actual != expected { t.Fatalf("bad:\n%s\n\nexpected:\n\n%s", actual, expected) } } func TestContext2Plan_taint(t *testing.T) { m := testModule(t, "plan-taint") p := testProvider("aws") p.DiffFn = testDiffFn s := &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "bar", Attributes: map[string]string{"num": "2"}, }, }, "aws_instance.bar": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "baz", Tainted: true, }, }, }, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: s, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanTaintStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } func TestContext2Apply_taintIgnoreChanges(t *testing.T) { m := testModule(t, "plan-taint-ignore-changes") p := testProvider("aws") p.ApplyFn = testApplyFn p.DiffFn = testDiffFn s := &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "foo", Attributes: map[string]string{ "vars": "foo", "type": "aws_instance", }, Tainted: true, }, }, }, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: s, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanTaintIgnoreChangesStr) if actual != expected { t.Fatalf("bad:\n%s", actual) } } // Fails about 50% of the time before the fix for GH-4982, covers the fix. func TestContext2Plan_taintDestroyInterpolatedCountRace(t *testing.T) { m := testModule(t, "plan-taint-interpolated-count") p := testProvider("aws") p.DiffFn = testDiffFn s := &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo.0": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "bar", Tainted: true, }, }, "aws_instance.foo.1": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ID: "bar"}, }, "aws_instance.foo.2": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ID: "bar"}, }, }, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: s, }) for i := 0; i < 100; i++ { plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(` DIFF: DESTROY/CREATE: aws_instance.foo.0 type: "" => "aws_instance" STATE: aws_instance.foo.0: (tainted) ID = bar aws_instance.foo.1: ID = bar aws_instance.foo.2: ID = bar `) if actual != expected { t.Fatalf("[%d] bad:\n%s\nexpected:\n%s\n", i, actual, expected) } } } func TestContext2Plan_targeted(t *testing.T) { m := testModule(t, "plan-targeted") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, Targets: []string{"aws_instance.foo"}, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(` DIFF: CREATE: aws_instance.foo num: "" => "2" type: "" => "aws_instance" STATE: <no state> `) if actual != expected { t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) } } // Test that targeting a module properly plans any inputs that depend // on another module. func TestContext2Plan_targetedCrossModule(t *testing.T) { m := testModule(t, "plan-targeted-cross-module") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, Targets: []string{"module.B"}, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(` DIFF: module.A: CREATE: aws_instance.foo foo: "" => "bar" type: "" => "aws_instance" module.B: CREATE: aws_instance.bar foo: "" => "<computed>" type: "" => "aws_instance" STATE: <no state> `) if actual != expected { t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) } } func TestContext2Plan_targetedOrphan(t *testing.T) { m := testModule(t, "plan-targeted-orphan") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.orphan": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "i-789xyz", }, }, "aws_instance.nottargeted": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "i-abc123", }, }, }, }, }, }, Destroy: true, Targets: []string{"aws_instance.orphan"}, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(`DIFF: DESTROY: aws_instance.orphan STATE: aws_instance.nottargeted: ID = i-abc123 aws_instance.orphan: ID = i-789xyz `) if actual != expected { t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) } } // https://github.com/hashicorp/terraform/issues/2538 func TestContext2Plan_targetedModuleOrphan(t *testing.T) { m := testModule(t, "plan-targeted-module-orphan") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: &State{ Modules: []*ModuleState{ &ModuleState{ Path: []string{"root", "child"}, Resources: map[string]*ResourceState{ "aws_instance.orphan": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "i-789xyz", }, }, "aws_instance.nottargeted": &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ ID: "i-abc123", }, }, }, }, }, }, Destroy: true, Targets: []string{"module.child.aws_instance.orphan"}, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(`DIFF: module.child: DESTROY: aws_instance.orphan STATE: module.child: aws_instance.nottargeted: ID = i-abc123 aws_instance.orphan: ID = i-789xyz `) if actual != expected { t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) } } func TestContext2Plan_targetedModuleUntargetedVariable(t *testing.T) { m := testModule(t, "plan-targeted-module-untargeted-variable") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, Targets: []string{"aws_instance.blue", "module.blue_mod"}, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(` DIFF: CREATE: aws_instance.blue module.blue_mod: CREATE: aws_instance.mod type: "" => "aws_instance" value: "" => "<computed>" STATE: <no state> `) if actual != expected { t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) } } // https://github.com/hashicorp/terraform/issues/4515 func TestContext2Plan_targetedOverTen(t *testing.T) { m := testModule(t, "plan-targeted-over-ten") p := testProvider("aws") p.DiffFn = testDiffFn resources := make(map[string]*ResourceState) var expectedState []string for i := 0; i < 13; i++ { key := fmt.Sprintf("aws_instance.foo.%d", i) id := fmt.Sprintf("i-abc%d", i) resources[key] = &ResourceState{ Type: "aws_instance", Primary: &InstanceState{ID: id}, } expectedState = append(expectedState, fmt.Sprintf("%s:\n ID = %s\n", key, id)) } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, State: &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: resources, }, }, }, Targets: []string{"aws_instance.foo[1]"}, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) sort.Strings(expectedState) expected := strings.TrimSpace(` DIFF: STATE: aws_instance.foo.0: ID = i-abc0 aws_instance.foo.1: ID = i-abc1 aws_instance.foo.10: ID = i-abc10 aws_instance.foo.11: ID = i-abc11 aws_instance.foo.12: ID = i-abc12 aws_instance.foo.2: ID = i-abc2 aws_instance.foo.3: ID = i-abc3 aws_instance.foo.4: ID = i-abc4 aws_instance.foo.5: ID = i-abc5 aws_instance.foo.6: ID = i-abc6 aws_instance.foo.7: ID = i-abc7 aws_instance.foo.8: ID = i-abc8 aws_instance.foo.9: ID = i-abc9 `) if actual != expected { t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) } } func TestContext2Plan_provider(t *testing.T) { m := testModule(t, "plan-provider") p := testProvider("aws") p.DiffFn = testDiffFn var value interface{} p.ConfigureFn = func(c *ResourceConfig) error { value, _ = c.Get("foo") return nil } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, Variables: map[string]interface{}{ "foo": "bar", }, }) if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } if value != "bar" { t.Fatalf("bad: %#v", value) } } func TestContext2Plan_varListErr(t *testing.T) { m := testModule(t, "plan-var-list-err") p := testProvider("aws") ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) _, err := ctx.Plan() if err == nil { t.Fatal("should error") } } func TestContext2Plan_ignoreChanges(t *testing.T) { m := testModule(t, "plan-ignore-changes") p := testProvider("aws") p.DiffFn = testDiffFn s := &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo": &ResourceState{ Primary: &InstanceState{ ID: "bar", Attributes: map[string]string{"ami": "ami-abcd1234"}, }, }, }, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, Variables: map[string]interface{}{ "foo": "ami-1234abcd", }, State: s, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } if len(plan.Diff.RootModule().Resources) < 1 { t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanIgnoreChangesStr) if actual != expected { t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected) } } func TestContext2Plan_ignoreChangesWildcard(t *testing.T) { m := testModule(t, "plan-ignore-changes-wildcard") p := testProvider("aws") p.DiffFn = testDiffFn s := &State{ Modules: []*ModuleState{ &ModuleState{ Path: rootModulePath, Resources: map[string]*ResourceState{ "aws_instance.foo": &ResourceState{ Primary: &InstanceState{ ID: "bar", Attributes: map[string]string{ "ami": "ami-abcd1234", "instance_type": "t2.micro", }, }, }, }, }, }, } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, Variables: map[string]interface{}{ "foo": "ami-1234abcd", "bar": "t2.small", }, State: s, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } if len(plan.Diff.RootModule().Resources) > 0 { t.Fatalf("bad: %#v", plan.Diff.RootModule().Resources) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanIgnoreChangesWildcardStr) if actual != expected { t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected) } } func TestContext2Plan_moduleMapLiteral(t *testing.T) { m := testModule(t, "plan-module-map-literal") p := testProvider("aws") p.ApplyFn = testApplyFn p.DiffFn = func(i *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) { // Here we verify that both the populated and empty map literals made it // through to the resource attributes val, _ := c.Get("tags") m, ok := val.(map[string]interface{}) if !ok { t.Fatalf("Tags attr not map: %#v", val) } if m["foo"] != "bar" { t.Fatalf("Bad value in tags attr: %#v", m) } { val, _ := c.Get("meta") m, ok := val.(map[string]interface{}) if !ok { t.Fatalf("Meta attr not map: %#v", val) } if len(m) != 0 { t.Fatalf("Meta attr not empty: %#v", val) } } return nil, nil } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } } func TestContext2Plan_computedValueInMap(t *testing.T) { m := testModule(t, "plan-computed-value-in-map") p := testProvider("aws") p.DiffFn = func(info *InstanceInfo, state *InstanceState, c *ResourceConfig) (*InstanceDiff, error) { switch info.Type { case "aws_computed_source": return &InstanceDiff{ Attributes: map[string]*ResourceAttrDiff{ "computed_read_only": &ResourceAttrDiff{ NewComputed: true, }, }, }, nil } return testDiffFn(info, state, c) } ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanComputedValueInMap) if actual != expected { t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected) } } func TestContext2Plan_moduleVariableFromSplat(t *testing.T) { m := testModule(t, "plan-module-variable-from-splat") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanModuleVariableFromSplat) if actual != expected { t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected) } } func TestContext2Plan_createBeforeDestroy_depends_datasource(t *testing.T) { m := testModule(t, "plan-cdb-depends-datasource") p := testProvider("aws") p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } if got := len(plan.Diff.Modules); got != 1 { t.Fatalf("got %d modules; want 1", got) } moduleDiff := plan.Diff.Modules[0] if _, ok := moduleDiff.Resources["aws_instance.foo.0"]; !ok { t.Fatalf("missing diff for aws_instance.foo.0") } if _, ok := moduleDiff.Resources["aws_instance.foo.1"]; !ok { t.Fatalf("missing diff for aws_instance.foo.1") } if _, ok := moduleDiff.Resources["data.aws_vpc.bar.0"]; !ok { t.Fatalf("missing diff for data.aws_vpc.bar.0") } if _, ok := moduleDiff.Resources["data.aws_vpc.bar.1"]; !ok { t.Fatalf("missing diff for data.aws_vpc.bar.1") } } // interpolated lists need to be stored in the original order. func TestContext2Plan_listOrder(t *testing.T) { m := testModule(t, "plan-list-order") p := testProvider("aws") p.ApplyFn = testApplyFn p.DiffFn = testDiffFn ctx := testContext2(t, &ContextOpts{ Module: m, Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, }) plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } rDiffs := plan.Diff.Modules[0].Resources rDiffA := rDiffs["aws_instance.a"] rDiffB := rDiffs["aws_instance.b"] if !rDiffA.Equal(rDiffB) { t.Fatal("aws_instance.a and aws_instance.b diffs should match:\n", plan) } }<|fim▁end|>
m := testModule(t, "plan-orphan")
<|file_name|>api.go<|end_file_name|><|fim▁begin|>// Copyright 2015 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. // Package v1 Gitea API. // // This documentation describes the Gitea API. // // Schemes: http, https // BasePath: /api/v1 // Version: 1.1.1 // License: MIT http://opensource.org/licenses/MIT // // Consumes: // - application/json // - text/plain // // Produces: // - application/json // - text/html // // Security: // - BasicAuth : // - Token : // - AccessToken : // - AuthorizationHeaderToken : // // SecurityDefinitions: // BasicAuth: // type: basic // Token: // type: apiKey // name: token // in: query // AccessToken: // type: apiKey // name: access_token // in: query // AuthorizationHeaderToken: // type: apiKey // name: Authorization // in: header // // swagger:meta package v1 import ( "strings" "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/auth" "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/routers/api/v1/admin" "code.gitea.io/gitea/routers/api/v1/misc" "code.gitea.io/gitea/routers/api/v1/org" "code.gitea.io/gitea/routers/api/v1/repo" _ "code.gitea.io/gitea/routers/api/v1/swagger" // for swagger generation "code.gitea.io/gitea/routers/api/v1/user" "code.gitea.io/gitea/routers/api/v1/utils" api "code.gitea.io/sdk/gitea" "github.com/go-macaron/binding" "gopkg.in/macaron.v1" ) func repoAssignment() macaron.Handler { return func(ctx *context.APIContext) { userName := ctx.Params(":username") repoName := ctx.Params(":reponame") var ( owner *models.User err error ) // Check if the user is the same as the repository owner. if ctx.IsSigned && ctx.User.LowerName == strings.ToLower(userName) { owner = ctx.User } else { owner, err = models.GetUserByName(userName) if err != nil { if models.IsErrUserNotExist(err) { ctx.Status(404) } else { ctx.Error(500, "GetUserByName", err) } return } } ctx.Repo.Owner = owner // Get repository. repo, err := models.GetRepositoryByName(owner.ID, repoName) if err != nil { if models.IsErrRepoNotExist(err) { redirectRepoID, err := models.LookupRepoRedirect(owner.ID, repoName) if err == nil { context.RedirectToRepo(ctx.Context, redirectRepoID) } else if models.IsErrRepoRedirectNotExist(err) { ctx.Status(404) } else { ctx.Error(500, "LookupRepoRedirect", err) } } else { ctx.Error(500, "GetRepositoryByName", err) } return } repo.Owner = owner if ctx.IsSigned && ctx.User.IsAdmin { ctx.Repo.AccessMode = models.AccessModeOwner } else { mode, err := models.AccessLevel(utils.UserID(ctx), repo) if err != nil { ctx.Error(500, "AccessLevel", err) return } ctx.Repo.AccessMode = mode } if !ctx.Repo.HasAccess() { ctx.Status(404) return } ctx.Repo.Repository = repo } } // Contexter middleware already checks token for user sign in process. func reqToken() macaron.Handler { return func(ctx *context.Context) { if !ctx.IsSigned { ctx.Error(401) return } } } func reqBasicAuth() macaron.Handler { return func(ctx *context.Context) { if !ctx.IsBasicAuth { ctx.Error(401) return } } } func reqAdmin() macaron.Handler { return func(ctx *context.Context) { if !ctx.IsSigned || !ctx.User.IsAdmin { ctx.Error(403) return } } } func reqRepoWriter() macaron.Handler { return func(ctx *context.Context) { if !ctx.Repo.IsWriter() { ctx.Error(403) return } } } func reqOrgMembership() macaron.Handler { return func(ctx *context.APIContext) { var orgID int64 if ctx.Org.Organization != nil { orgID = ctx.Org.Organization.ID } else if ctx.Org.Team != nil { orgID = ctx.Org.Team.OrgID } else { ctx.Error(500, "", "reqOrgMembership: unprepared context") return } if isMember, err := models.IsOrganizationMember(orgID, ctx.User.ID); err != nil { ctx.Error(500, "IsOrganizationMember", err) return } else if !isMember { if ctx.Org.Organization != nil { ctx.Error(403, "", "Must be an organization member") } else { ctx.Status(404) } return } } } func reqOrgOwnership() macaron.Handler { return func(ctx *context.APIContext) { var orgID int64 if ctx.Org.Organization != nil { orgID = ctx.Org.Organization.ID } else if ctx.Org.Team != nil { orgID = ctx.Org.Team.OrgID } else { ctx.Error(500, "", "reqOrgOwnership: unprepared context") return } isOwner, err := models.IsOrganizationOwner(orgID, ctx.User.ID) if err != nil { ctx.Error(500, "IsOrganizationOwner", err) } else if !isOwner { if ctx.Org.Organization != nil { ctx.Error(403, "", "Must be an organization owner") } else { ctx.Status(404) } return } } } func orgAssignment(args ...bool) macaron.Handler { var ( assignOrg bool assignTeam bool ) if len(args) > 0 { assignOrg = args[0] } if len(args) > 1 { assignTeam = args[1] } return func(ctx *context.APIContext) { ctx.Org = new(context.APIOrganization) var err error if assignOrg { ctx.Org.Organization, err = models.GetOrgByName(ctx.Params(":orgname")) if err != nil { if models.IsErrOrgNotExist(err) { ctx.Status(404) } else { ctx.Error(500, "GetOrgByName", err) } return } } if assignTeam { ctx.Org.Team, err = models.GetTeamByID(ctx.ParamsInt64(":teamid")) if err != nil { if models.IsErrUserNotExist(err) { ctx.Status(404) } else { ctx.Error(500, "GetTeamById", err) } return } } } } func mustEnableIssues(ctx *context.APIContext) { if !ctx.Repo.Repository.UnitEnabled(models.UnitTypeIssues) { ctx.Status(404) return } } func mustAllowPulls(ctx *context.Context) { if !ctx.Repo.Repository.AllowsPulls() { ctx.Status(404) return } } // RegisterRoutes registers all v1 APIs routes to web application. // FIXME: custom form error response func RegisterRoutes(m *macaron.Macaron) { bind := binding.Bind if setting.API.EnableSwaggerEndpoint { m.Get("/swagger", misc.Swagger) //Render V1 by default } m.Group("/v1", func() { // Miscellaneous if setting.API.EnableSwaggerEndpoint { m.Get("/swagger", misc.Swagger) } m.Get("/version", misc.Version) m.Post("/markdown", bind(api.MarkdownOption{}), misc.Markdown) m.Post("/markdown/raw", misc.MarkdownRaw) // Users m.Group("/users", func() { m.Get("/search", user.Search) m.Group("/:username", func() { m.Get("", user.GetInfo) m.Get("/repos", user.ListUserRepos) m.Group("/tokens", func() { m.Combo("").Get(user.ListAccessTokens). Post(bind(api.CreateAccessTokenOption{}), user.CreateAccessToken) m.Combo("/:id").Delete(user.DeleteAccessToken) }, reqBasicAuth()) }) }) m.Group("/users", func() { m.Group("/:username", func() { m.Get("/keys", user.ListPublicKeys) m.Get("/gpg_keys", user.ListGPGKeys) m.Get("/followers", user.ListFollowers) m.Group("/following", func() { m.Get("", user.ListFollowing) m.Get("/:target", user.CheckFollowing) }) m.Get("/starred", user.GetStarredRepos) m.Get("/subscriptions", user.GetWatchedRepos) }) }, reqToken()) m.Group("/user", func() { m.Get("", user.GetAuthenticatedUser) m.Combo("/emails").Get(user.ListEmails). Post(bind(api.CreateEmailOption{}), user.AddEmail). Delete(bind(api.DeleteEmailOption{}), user.DeleteEmail) m.Get("/followers", user.ListMyFollowers) m.Group("/following", func() { m.Get("", user.ListMyFollowing) m.Combo("/:username").Get(user.CheckMyFollowing).Put(user.Follow).Delete(user.Unfollow) }) m.Group("/keys", func() { m.Combo("").Get(user.ListMyPublicKeys). Post(bind(api.CreateKeyOption{}), user.CreatePublicKey) m.Combo("/:id").Get(user.GetPublicKey). Delete(user.DeletePublicKey) }) m.Group("/gpg_keys", func() { m.Combo("").Get(user.ListMyGPGKeys). Post(bind(api.CreateGPGKeyOption{}), user.CreateGPGKey) m.Combo("/:id").Get(user.GetGPGKey). Delete(user.DeleteGPGKey) }) m.Combo("/repos").Get(user.ListMyRepos). Post(bind(api.CreateRepoOption{}), repo.Create) m.Group("/starred", func() { m.Get("", user.GetMyStarredRepos) m.Group("/:username/:reponame", func() {<|fim▁hole|> m.Put("", user.Star) m.Delete("", user.Unstar) }, repoAssignment()) }) m.Get("/times", repo.ListMyTrackedTimes) m.Get("/subscriptions", user.GetMyWatchedRepos) }, reqToken()) // Repositories m.Post("/org/:org/repos", reqToken(), bind(api.CreateRepoOption{}), repo.CreateOrgRepo) m.Group("/repos", func() { m.Get("/search", repo.Search) }) m.Combo("/repositories/:id", reqToken()).Get(repo.GetByID) m.Group("/repos", func() { m.Post("/migrate", reqToken(), bind(auth.MigrateRepoForm{}), repo.Migrate) m.Group("/:username/:reponame", func() { m.Combo("").Get(repo.Get).Delete(reqToken(), repo.Delete) m.Group("/hooks", func() { m.Combo("").Get(repo.ListHooks). Post(bind(api.CreateHookOption{}), repo.CreateHook) m.Group("/:id", func() { m.Combo("").Get(repo.GetHook). Patch(bind(api.EditHookOption{}), repo.EditHook). Delete(repo.DeleteHook) m.Post("/tests", context.RepoRef(), repo.TestHook) }) }, reqToken(), reqRepoWriter()) m.Group("/collaborators", func() { m.Get("", repo.ListCollaborators) m.Combo("/:collaborator").Get(repo.IsCollaborator). Put(bind(api.AddCollaboratorOption{}), repo.AddCollaborator). Delete(repo.DeleteCollaborator) }, reqToken()) m.Get("/raw/*", context.RepoRefByType(context.RepoRefAny), repo.GetRawFile) m.Get("/archive/*", repo.GetArchive) m.Combo("/forks").Get(repo.ListForks). Post(reqToken(), bind(api.CreateForkOption{}), repo.CreateFork) m.Group("/branches", func() { m.Get("", repo.ListBranches) m.Get("/*", context.RepoRefByType(context.RepoRefBranch), repo.GetBranch) }) m.Group("/keys", func() { m.Combo("").Get(repo.ListDeployKeys). Post(bind(api.CreateKeyOption{}), repo.CreateDeployKey) m.Combo("/:id").Get(repo.GetDeployKey). Delete(repo.DeleteDeploykey) }, reqToken(), reqRepoWriter()) m.Group("/times", func() { m.Combo("").Get(repo.ListTrackedTimesByRepository) m.Combo("/:timetrackingusername").Get(repo.ListTrackedTimesByUser) }, mustEnableIssues) m.Group("/issues", func() { m.Combo("").Get(repo.ListIssues). Post(reqToken(), bind(api.CreateIssueOption{}), repo.CreateIssue) m.Group("/comments", func() { m.Get("", repo.ListRepoIssueComments) m.Combo("/:id", reqToken()). Patch(bind(api.EditIssueCommentOption{}), repo.EditIssueComment). Delete(repo.DeleteIssueComment) }) m.Group("/:index", func() { m.Combo("").Get(repo.GetIssue). Patch(reqToken(), bind(api.EditIssueOption{}), repo.EditIssue) m.Group("/comments", func() { m.Combo("").Get(repo.ListIssueComments). Post(reqToken(), bind(api.CreateIssueCommentOption{}), repo.CreateIssueComment) m.Combo("/:id", reqToken()).Patch(bind(api.EditIssueCommentOption{}), repo.EditIssueCommentDeprecated). Delete(repo.DeleteIssueCommentDeprecated) }) m.Group("/labels", func() { m.Combo("").Get(repo.ListIssueLabels). Post(reqToken(), bind(api.IssueLabelsOption{}), repo.AddIssueLabels). Put(reqToken(), bind(api.IssueLabelsOption{}), repo.ReplaceIssueLabels). Delete(reqToken(), repo.ClearIssueLabels) m.Delete("/:id", reqToken(), repo.DeleteIssueLabel) }) m.Group("/times", func() { m.Combo("").Get(repo.ListTrackedTimes). Post(reqToken(), bind(api.AddTimeOption{}), repo.AddTime) }) }) }, mustEnableIssues) m.Group("/labels", func() { m.Combo("").Get(repo.ListLabels). Post(reqToken(), bind(api.CreateLabelOption{}), repo.CreateLabel) m.Combo("/:id").Get(repo.GetLabel). Patch(reqToken(), bind(api.EditLabelOption{}), repo.EditLabel). Delete(reqToken(), repo.DeleteLabel) }) m.Group("/milestones", func() { m.Combo("").Get(repo.ListMilestones). Post(reqToken(), reqRepoWriter(), bind(api.CreateMilestoneOption{}), repo.CreateMilestone) m.Combo("/:id").Get(repo.GetMilestone). Patch(reqToken(), reqRepoWriter(), bind(api.EditMilestoneOption{}), repo.EditMilestone). Delete(reqToken(), reqRepoWriter(), repo.DeleteMilestone) }) m.Get("/stargazers", repo.ListStargazers) m.Get("/subscribers", repo.ListSubscribers) m.Group("/subscription", func() { m.Get("", user.IsWatching) m.Put("", reqToken(), user.Watch) m.Delete("", reqToken(), user.Unwatch) }) m.Group("/releases", func() { m.Combo("").Get(repo.ListReleases). Post(reqToken(), reqRepoWriter(), context.ReferencesGitRepo(), bind(api.CreateReleaseOption{}), repo.CreateRelease) m.Group("/:id", func() { m.Combo("").Get(repo.GetRelease). Patch(reqToken(), reqRepoWriter(), context.ReferencesGitRepo(), bind(api.EditReleaseOption{}), repo.EditRelease). Delete(reqToken(), reqRepoWriter(), repo.DeleteRelease) m.Group("/assets", func() { m.Combo("").Get(repo.ListReleaseAttachments). Post(reqToken(), reqRepoWriter(), repo.CreateReleaseAttachment) m.Combo("/:asset").Get(repo.GetReleaseAttachment). Patch(reqToken(), reqRepoWriter(), bind(api.EditAttachmentOptions{}), repo.EditReleaseAttachment). Delete(reqToken(), reqRepoWriter(), repo.DeleteReleaseAttachment) }) }) }) m.Post("/mirror-sync", reqToken(), reqRepoWriter(), repo.MirrorSync) m.Get("/editorconfig/:filename", context.RepoRef(), repo.GetEditorconfig) m.Group("/pulls", func() { m.Combo("").Get(bind(api.ListPullRequestsOptions{}), repo.ListPullRequests). Post(reqToken(), reqRepoWriter(), bind(api.CreatePullRequestOption{}), repo.CreatePullRequest) m.Group("/:index", func() { m.Combo("").Get(repo.GetPullRequest). Patch(reqToken(), reqRepoWriter(), bind(api.EditPullRequestOption{}), repo.EditPullRequest) m.Combo("/merge").Get(repo.IsPullRequestMerged). Post(reqToken(), reqRepoWriter(), bind(auth.MergePullRequestForm{}), repo.MergePullRequest) }) }, mustAllowPulls, context.ReferencesGitRepo()) m.Group("/statuses", func() { m.Combo("/:sha").Get(repo.GetCommitStatuses). Post(reqToken(), reqRepoWriter(), bind(api.CreateStatusOption{}), repo.NewCommitStatus) }) m.Group("/commits/:ref", func() { m.Get("/status", repo.GetCombinedCommitStatusByRef) m.Get("/statuses", repo.GetCommitStatusesByRef) }) }, repoAssignment()) }) // Organizations m.Get("/user/orgs", reqToken(), org.ListMyOrgs) m.Get("/users/:username/orgs", org.ListUserOrgs) m.Group("/orgs/:orgname", func() { m.Get("/repos", user.ListOrgRepos) m.Combo("").Get(org.Get). Patch(reqToken(), reqOrgOwnership(), bind(api.EditOrgOption{}), org.Edit) m.Group("/members", func() { m.Get("", org.ListMembers) m.Combo("/:username").Get(org.IsMember). Delete(reqToken(), reqOrgOwnership(), org.DeleteMember) }) m.Group("/public_members", func() { m.Get("", org.ListPublicMembers) m.Combo("/:username").Get(org.IsPublicMember). Put(reqToken(), reqOrgMembership(), org.PublicizeMember). Delete(reqToken(), reqOrgMembership(), org.ConcealMember) }) m.Combo("/teams", reqToken(), reqOrgMembership()).Get(org.ListTeams). Post(bind(api.CreateTeamOption{}), org.CreateTeam) m.Group("/hooks", func() { m.Combo("").Get(org.ListHooks). Post(bind(api.CreateHookOption{}), org.CreateHook) m.Combo("/:id").Get(org.GetHook). Patch(reqOrgOwnership(), bind(api.EditHookOption{}), org.EditHook). Delete(reqOrgOwnership(), org.DeleteHook) }, reqToken(), reqOrgMembership()) }, orgAssignment(true)) m.Group("/teams/:teamid", func() { m.Combo("").Get(org.GetTeam). Patch(reqOrgOwnership(), bind(api.EditTeamOption{}), org.EditTeam). Delete(reqOrgOwnership(), org.DeleteTeam) m.Group("/members", func() { m.Get("", org.GetTeamMembers) m.Combo("/:username"). Put(reqOrgOwnership(), org.AddTeamMember). Delete(reqOrgOwnership(), org.RemoveTeamMember) }) m.Group("/repos", func() { m.Get("", org.GetTeamRepos) m.Combo("/:orgname/:reponame"). Put(org.AddTeamRepository). Delete(org.RemoveTeamRepository) }) }, orgAssignment(false, true), reqToken(), reqOrgMembership()) m.Any("/*", func(ctx *context.Context) { ctx.Error(404) }) m.Group("/admin", func() { m.Group("/users", func() { m.Post("", bind(api.CreateUserOption{}), admin.CreateUser) m.Group("/:username", func() { m.Combo("").Patch(bind(api.EditUserOption{}), admin.EditUser). Delete(admin.DeleteUser) m.Group("/keys", func() { m.Post("", bind(api.CreateKeyOption{}), admin.CreatePublicKey) m.Delete("/:id", admin.DeleteUserPublicKey) }) m.Post("/orgs", bind(api.CreateOrgOption{}), admin.CreateOrg) m.Post("/repos", bind(api.CreateRepoOption{}), admin.CreateRepo) }) }) }, reqAdmin()) m.Group("/topics", func() { m.Get("/search", repo.TopicSearch) }) }, context.APIContexter()) }<|fim▁end|>
m.Get("", user.IsStarring)
<|file_name|>yaml.py<|end_file_name|><|fim▁begin|># Copyright 2012 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Most of this code originated in sphinx.domains.python and # sphinx.ext.autodoc and has been only slightly adapted for use in # subclasses here. # :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. # :license: BSD, see LICENSE for details. import re from sphinx import addnodes from sphinx.domains.python import _pseudo_parse_arglist from sphinx.domains.python import PyModulelevel from sphinx.ext.autodoc import Documenter from sphinx.ext.autodoc import FunctionDocumenter from sphinx.locale import _ yaml_sig_re = re.compile(r'yaml:\s*(.*)') class PyYAMLFunction(PyModulelevel): def handle_signature(self, sig, signode): """Transform a Python signature into RST nodes. Return (fully qualified name of the thing, classname if any). If inside a class, the current class name is handled intelligently: * it is stripped from the displayed name if present * it is added to the full name (return value) if not present """ name_prefix = None name = sig arglist = None retann = None # determine module and class name (if applicable), as well as full name modname = self.options.get( 'module', self.env.temp_data.get('py:module')) classname = self.env.temp_data.get('py:class') fullname = name signode['module'] = modname signode['class'] = classname signode['fullname'] = fullname sig_prefix = self.get_signature_prefix(sig) if sig_prefix: signode += addnodes.desc_annotation(sig_prefix, sig_prefix) if name_prefix: signode += addnodes.desc_addname(name_prefix, name_prefix) anno = self.options.get('annotation') signode += addnodes.desc_name(name, name) if not arglist: if self.needs_arglist(): # for callables, add an empty parameter list signode += addnodes.desc_parameterlist() if retann: signode += addnodes.desc_returns(retann, retann) if anno: signode += addnodes.desc_annotation(' ' + anno, ' ' + anno) return fullname, name_prefix _pseudo_parse_arglist(signode, arglist) if retann: signode += addnodes.desc_returns(retann, retann) if anno: signode += addnodes.desc_annotation(' ' + anno, ' ' + anno) return fullname, name_prefix def get_index_text(self, modname, name_cls): return _('%s (in module %s)') % (name_cls[0], modname) class YAMLFunctionDocumenter(FunctionDocumenter): priority = FunctionDocumenter.priority + 10 objtype = 'yamlfunction' directivetype = 'yamlfunction' @classmethod def can_document_member(cls, member, membername, isattr, parent): if not FunctionDocumenter.can_document_member(member, membername, isattr, parent): return False if member.__doc__ is not None and yaml_sig_re.match(member.__doc__): return True return False def _find_signature(self, encoding=None): docstrings = Documenter.get_doc(self, encoding, 2) if len(docstrings) != 1: return doclines = docstrings[0] setattr(self, '__new_doclines', doclines) if not doclines: return # match first line of docstring against signature RE match = yaml_sig_re.match(doclines[0]) if not match:<|fim▁hole|> # ok, now jump over remaining empty lines and set the remaining # lines as the new doclines i = 1 while i < len(doclines) and not doclines[i].strip(): i += 1 setattr(self, '__new_doclines', doclines[i:]) return name def get_doc(self, encoding=None, ignore=1): lines = getattr(self, '__new_doclines', None) if lines is not None: return [lines] return Documenter.get_doc(self, encoding, ignore) def format_signature(self): result = self._find_signature() self._name = result return '' def format_name(self): return self._name def setup(app): app.add_autodocumenter(YAMLFunctionDocumenter) app.add_directive_to_domain('py', 'yamlfunction', PyYAMLFunction)<|fim▁end|>
return name = match.group(1)
<|file_name|>test_io.py<|end_file_name|><|fim▁begin|>"""Unit tests for the io module.""" # Tests of io are scattered over the test suite: # * test_bufio - tests file buffering # * test_memoryio - tests BytesIO and StringIO # * test_fileio - tests FileIO # * test_file - tests the file interface # * test_io - tests everything else in the io module # * test_univnewlines - tests universal newline support # * test_largefile - tests operations on a file greater than 2**32 bytes # (only enabled with -ulargefile) ################################################################################ # ATTENTION TEST WRITERS!!! ################################################################################ # When writing tests for io, it's important to test both the C and Python # implementations. This is usually done by writing a base test that refers to # the type it is testing as a attribute. Then it provides custom subclasses to # test both implementations. This file has lots of examples. ################################################################################ from __future__ import print_function from __future__ import unicode_literals import os import sys import time import array import random import unittest import weakref import warnings import abc import signal import errno from itertools import cycle, count from collections import deque from UserList import UserList from test import test_support as support import contextlib import codecs import io # C implementation of io import _pyio as pyio # Python implementation of io try: import threading except ImportError: threading = None try: import fcntl except ImportError: fcntl = None __metaclass__ = type bytes = support.py3k_bytes def _default_chunk_size(): """Get the default TextIOWrapper chunk size""" with io.open(__file__, "r", encoding="latin1") as f: return f._CHUNK_SIZE class MockRawIOWithoutRead: """A RawIO implementation without read(), so as to exercise the default RawIO.read() which calls readinto().""" def __init__(self, read_stack=()): self._read_stack = list(read_stack) self._write_stack = [] self._reads = 0 self._extraneous_reads = 0 def write(self, b): self._write_stack.append(bytes(b)) return len(b) def writable(self): return True def fileno(self): return 42 def readable(self): return True def seekable(self): return True def seek(self, pos, whence): return 0 # wrong but we gotta return something def tell(self): return 0 # same comment as above def readinto(self, buf): self._reads += 1 max_len = len(buf) try: data = self._read_stack[0] except IndexError: self._extraneous_reads += 1 return 0 if data is None: del self._read_stack[0] return None n = len(data) if len(data) <= max_len: del self._read_stack[0] buf[:n] = data return n else: buf[:] = data[:max_len] self._read_stack[0] = data[max_len:] return max_len def truncate(self, pos=None): return pos class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase): pass class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase): pass class MockRawIO(MockRawIOWithoutRead): def read(self, n=None): self._reads += 1 try: return self._read_stack.pop(0) except: self._extraneous_reads += 1 return b"" class CMockRawIO(MockRawIO, io.RawIOBase): pass class PyMockRawIO(MockRawIO, pyio.RawIOBase): pass class MisbehavedRawIO(MockRawIO): def write(self, b): return MockRawIO.write(self, b) * 2 def read(self, n=None): return MockRawIO.read(self, n) * 2 def seek(self, pos, whence): return -123 def tell(self): return -456 def readinto(self, buf): MockRawIO.readinto(self, buf) return len(buf) * 5 class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase): pass class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase): pass class CloseFailureIO(MockRawIO): closed = 0 def close(self): if not self.closed: self.closed = 1 raise IOError class CCloseFailureIO(CloseFailureIO, io.RawIOBase): pass class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase): pass class MockFileIO: def __init__(self, data): self.read_history = [] super(MockFileIO, self).__init__(data) def read(self, n=None): res = super(MockFileIO, self).read(n) self.read_history.append(None if res is None else len(res)) return res def readinto(self, b): res = super(MockFileIO, self).readinto(b) self.read_history.append(res) return res class CMockFileIO(MockFileIO, io.BytesIO): pass class PyMockFileIO(MockFileIO, pyio.BytesIO): pass class MockNonBlockWriterIO: def __init__(self): self._write_stack = [] self._blocker_char = None def pop_written(self): s = b"".join(self._write_stack) self._write_stack[:] = [] return s def block_on(self, char): """Block when a given char is encountered.""" self._blocker_char = char def readable(self): return True def seekable(self): return True def writable(self): return True def write(self, b): b = bytes(b) n = -1 if self._blocker_char: try: n = b.index(self._blocker_char) except ValueError: pass else: if n > 0: # write data up to the first blocker self._write_stack.append(b[:n]) return n else: # cancel blocker and indicate would block self._blocker_char = None return None self._write_stack.append(b) return len(b) class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase): BlockingIOError = io.BlockingIOError class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase): BlockingIOError = pyio.BlockingIOError class IOTest(unittest.TestCase): def setUp(self): support.unlink(support.TESTFN) def tearDown(self): support.unlink(support.TESTFN) def write_ops(self, f): self.assertEqual(f.write(b"blah."), 5) f.truncate(0) self.assertEqual(f.tell(), 5) f.seek(0) self.assertEqual(f.write(b"blah."), 5) self.assertEqual(f.seek(0), 0) self.assertEqual(f.write(b"Hello."), 6) self.assertEqual(f.tell(), 6) self.assertEqual(f.seek(-1, 1), 5) self.assertEqual(f.tell(), 5) self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9) self.assertEqual(f.seek(0), 0) self.assertEqual(f.write(b"h"), 1) self.assertEqual(f.seek(-1, 2), 13) self.assertEqual(f.tell(), 13) self.assertEqual(f.truncate(12), 12) self.assertEqual(f.tell(), 13) self.assertRaises(TypeError, f.seek, 0.0) def read_ops(self, f, buffered=False): data = f.read(5) self.assertEqual(data, b"hello") data = bytearray(data) self.assertEqual(f.readinto(data), 5) self.assertEqual(data, b" worl") self.assertEqual(f.readinto(data), 2) self.assertEqual(len(data), 5) self.assertEqual(data[:2], b"d\n") self.assertEqual(f.seek(0), 0) self.assertEqual(f.read(20), b"hello world\n") self.assertEqual(f.read(1), b"") self.assertEqual(f.readinto(bytearray(b"x")), 0) self.assertEqual(f.seek(-6, 2), 6) self.assertEqual(f.read(5), b"world") self.assertEqual(f.read(0), b"") self.assertEqual(f.readinto(bytearray()), 0) self.assertEqual(f.seek(-6, 1), 5) self.assertEqual(f.read(5), b" worl") self.assertEqual(f.tell(), 10) self.assertRaises(TypeError, f.seek, 0.0) if buffered: f.seek(0) self.assertEqual(f.read(), b"hello world\n") f.seek(6) self.assertEqual(f.read(), b"world\n") self.assertEqual(f.read(), b"") LARGE = 2**31 def large_file_ops(self, f): assert f.readable() assert f.writable() self.assertEqual(f.seek(self.LARGE), self.LARGE) self.assertEqual(f.tell(), self.LARGE) self.assertEqual(f.write(b"xxx"), 3) self.assertEqual(f.tell(), self.LARGE + 3) self.assertEqual(f.seek(-1, 1), self.LARGE + 2) self.assertEqual(f.truncate(), self.LARGE + 2) self.assertEqual(f.tell(), self.LARGE + 2) self.assertEqual(f.seek(0, 2), self.LARGE + 2) self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1) self.assertEqual(f.tell(), self.LARGE + 2) self.assertEqual(f.seek(0, 2), self.LARGE + 1) self.assertEqual(f.seek(-1, 2), self.LARGE) self.assertEqual(f.read(2), b"x") def test_invalid_operations(self): # Try writing on a file opened in read mode and vice-versa. for mode in ("w", "wb"): with self.open(support.TESTFN, mode) as fp: self.assertRaises(IOError, fp.read) self.assertRaises(IOError, fp.readline) with self.open(support.TESTFN, "rb") as fp: self.assertRaises(IOError, fp.write, b"blah") self.assertRaises(IOError, fp.writelines, [b"blah\n"]) with self.open(support.TESTFN, "r") as fp: self.assertRaises(IOError, fp.write, "blah") self.assertRaises(IOError, fp.writelines, ["blah\n"]) def test_raw_file_io(self): with self.open(support.TESTFN, "wb", buffering=0) as f: self.assertEqual(f.readable(), False) self.assertEqual(f.writable(), True) self.assertEqual(f.seekable(), True) self.write_ops(f) with self.open(support.TESTFN, "rb", buffering=0) as f: self.assertEqual(f.readable(), True) self.assertEqual(f.writable(), False) self.assertEqual(f.seekable(), True) self.read_ops(f) def test_buffered_file_io(self): with self.open(support.TESTFN, "wb") as f: self.assertEqual(f.readable(), False) self.assertEqual(f.writable(), True) self.assertEqual(f.seekable(), True) self.write_ops(f) with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.readable(), True) self.assertEqual(f.writable(), False) self.assertEqual(f.seekable(), True) self.read_ops(f, True) def test_readline(self): with self.open(support.TESTFN, "wb") as f: f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line") with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.readline(), b"abc\n") self.assertEqual(f.readline(10), b"def\n") self.assertEqual(f.readline(2), b"xy") self.assertEqual(f.readline(4), b"zzy\n") self.assertEqual(f.readline(), b"foo\x00bar\n") self.assertEqual(f.readline(None), b"another line") self.assertRaises(TypeError, f.readline, 5.3) with self.open(support.TESTFN, "r") as f: self.assertRaises(TypeError, f.readline, 5.3) def test_raw_bytes_io(self): f = self.BytesIO() self.write_ops(f) data = f.getvalue() self.assertEqual(data, b"hello world\n") f = self.BytesIO(data) self.read_ops(f, True) def test_large_file_ops(self): # On Windows and Mac OSX this test comsumes large resources; It takes # a long time to build the >2GB file and takes >2GB of disk space # therefore the resource must be enabled to run this test. if sys.platform[:3] == 'win' or sys.platform == 'darwin': support.requires( 'largefile', 'test requires %s bytes and a long time to run' % self.LARGE) with self.open(support.TESTFN, "w+b", 0) as f: self.large_file_ops(f) with self.open(support.TESTFN, "w+b") as f: self.large_file_ops(f) def test_with_open(self): for bufsize in (0, 1, 100): f = None with self.open(support.TESTFN, "wb", bufsize) as f: f.write(b"xxx") self.assertEqual(f.closed, True) f = None try: with self.open(support.TESTFN, "wb", bufsize) as f: 1 // 0 except ZeroDivisionError: self.assertEqual(f.closed, True) else: self.fail("1 // 0 didn't raise an exception") # issue 5008 def test_append_mode_tell(self): with self.open(support.TESTFN, "wb") as f: f.write(b"xxx") with self.open(support.TESTFN, "ab", buffering=0) as f: self.assertEqual(f.tell(), 3) with self.open(support.TESTFN, "ab") as f: self.assertEqual(f.tell(), 3) with self.open(support.TESTFN, "a") as f: self.assertTrue(f.tell() > 0) def test_destructor(self): record = [] class MyFileIO(self.FileIO): def __del__(self): record.append(1) try: f = super(MyFileIO, self).__del__ except AttributeError: pass else: f() def close(self): record.append(2) super(MyFileIO, self).close() def flush(self): record.append(3) super(MyFileIO, self).flush() f = MyFileIO(support.TESTFN, "wb") f.write(b"xxx") del f support.gc_collect() self.assertEqual(record, [1, 2, 3]) with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"xxx") def _check_base_destructor(self, base): record = [] class MyIO(base): def __init__(self): # This exercises the availability of attributes on object # destruction. # (in the C version, close() is called by the tp_dealloc # function, not by __del__) self.on_del = 1 self.on_close = 2 self.on_flush = 3 def __del__(self): record.append(self.on_del) try: f = super(MyIO, self).__del__ except AttributeError: pass else: f() def close(self): record.append(self.on_close) super(MyIO, self).close() def flush(self): record.append(self.on_flush) super(MyIO, self).flush() f = MyIO() del f support.gc_collect() self.assertEqual(record, [1, 2, 3]) def test_IOBase_destructor(self): self._check_base_destructor(self.IOBase) def test_RawIOBase_destructor(self): self._check_base_destructor(self.RawIOBase) def test_BufferedIOBase_destructor(self): self._check_base_destructor(self.BufferedIOBase) def test_TextIOBase_destructor(self): self._check_base_destructor(self.TextIOBase) def test_close_flushes(self): with self.open(support.TESTFN, "wb") as f: f.write(b"xxx") with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"xxx") def test_array_writes(self): a = array.array(b'i', range(10)) n = len(a.tostring()) with self.open(support.TESTFN, "wb", 0) as f: self.assertEqual(f.write(a), n) with self.open(support.TESTFN, "wb") as f: self.assertEqual(f.write(a), n) def test_closefd(self): self.assertRaises(ValueError, self.open, support.TESTFN, 'w', closefd=False) def test_read_closed(self): with self.open(support.TESTFN, "w") as f: f.write("egg\n") with self.open(support.TESTFN, "r") as f: file = self.open(f.fileno(), "r", closefd=False) self.assertEqual(file.read(), "egg\n") file.seek(0) file.close() self.assertRaises(ValueError, file.read) def test_no_closefd_with_filename(self): # can't use closefd in combination with a file name self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False) def test_closefd_attr(self): with self.open(support.TESTFN, "wb") as f: f.write(b"egg\n") with self.open(support.TESTFN, "r") as f: self.assertEqual(f.buffer.raw.closefd, True) file = self.open(f.fileno(), "r", closefd=False) self.assertEqual(file.buffer.raw.closefd, False) def test_garbage_collection(self): # FileIO objects are collected, and collecting them flushes # all data to disk. f = self.FileIO(support.TESTFN, "wb") f.write(b"abcxxx") f.f = f wr = weakref.ref(f) del f support.gc_collect() self.assertTrue(wr() is None, wr) with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"abcxxx") def test_unbounded_file(self): # Issue #1174606: reading from an unbounded stream such as /dev/zero. zero = "/dev/zero" if not os.path.exists(zero): self.skipTest("{0} does not exist".format(zero)) if sys.maxsize > 0x7FFFFFFF: self.skipTest("test can only run in a 32-bit address space") if support.real_max_memuse < support._2G: self.skipTest("test requires at least 2GB of memory") with self.open(zero, "rb", buffering=0) as f: self.assertRaises(OverflowError, f.read) with self.open(zero, "rb") as f: self.assertRaises(OverflowError, f.read) with self.open(zero, "r") as f: self.assertRaises(OverflowError, f.read) def check_flush_error_on_close(self, *args, **kwargs): # Test that the file is closed despite failed flush # and that flush() is called before file closed. f = self.open(*args, **kwargs) closed = [] def bad_flush(): closed[:] = [f.closed] raise IOError() f.flush = bad_flush self.assertRaises(IOError, f.close) # exception not swallowed self.assertTrue(f.closed) self.assertTrue(closed) # flush() called self.assertFalse(closed[0]) # flush() called before file closed f.flush = lambda: None # break reference loop def test_flush_error_on_close(self): # raw file # Issue #5700: io.FileIO calls flush() after file closed self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0) fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb', buffering=0) fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False) os.close(fd) # buffered io self.check_flush_error_on_close(support.TESTFN, 'wb') fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb') fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb', closefd=False) os.close(fd) # text io self.check_flush_error_on_close(support.TESTFN, 'w') fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'w') fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'w', closefd=False) os.close(fd) def test_multi_close(self): f = self.open(support.TESTFN, "wb", buffering=0) f.close() f.close() f.close() self.assertRaises(ValueError, f.flush) def test_RawIOBase_read(self): # Exercise the default RawIOBase.read() implementation (which calls # readinto() internally). rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None)) self.assertEqual(rawio.read(2), b"ab") self.assertEqual(rawio.read(2), b"c") self.assertEqual(rawio.read(2), b"d") self.assertEqual(rawio.read(2), None) self.assertEqual(rawio.read(2), b"ef") self.assertEqual(rawio.read(2), b"g") self.assertEqual(rawio.read(2), None) self.assertEqual(rawio.read(2), b"") def test_fileio_closefd(self): # Issue #4841 with self.open(__file__, 'rb') as f1, \ self.open(__file__, 'rb') as f2: fileio = self.FileIO(f1.fileno(), closefd=False) # .__init__() must not close f1 fileio.__init__(f2.fileno(), closefd=False) f1.readline() # .close() must not close f2 fileio.close() f2.readline() def test_nonbuffered_textio(self): with warnings.catch_warnings(record=True) as recorded: with self.assertRaises(ValueError): self.open(support.TESTFN, 'w', buffering=0) support.gc_collect() self.assertEqual(recorded, []) def test_invalid_newline(self): with warnings.catch_warnings(record=True) as recorded: with self.assertRaises(ValueError): self.open(support.TESTFN, 'w', newline='invalid') support.gc_collect() self.assertEqual(recorded, []) class CIOTest(IOTest): def test_IOBase_finalize(self): # Issue #12149: segmentation fault on _PyIOBase_finalize when both a # class which inherits IOBase and an object of this class are caught # in a reference cycle and close() is already in the method cache. class MyIO(self.IOBase): def close(self): pass # create an instance to populate the method cache MyIO() obj = MyIO() obj.obj = obj wr = weakref.ref(obj) del MyIO del obj support.gc_collect() self.assertTrue(wr() is None, wr) class PyIOTest(IOTest): test_array_writes = unittest.skip( "len(array.array) returns number of elements rather than bytelength" )(IOTest.test_array_writes) class CommonBufferedTests: # Tests common to BufferedReader, BufferedWriter and BufferedRandom def test_detach(self): raw = self.MockRawIO() buf = self.tp(raw) self.assertIs(buf.detach(), raw) self.assertRaises(ValueError, buf.detach) repr(buf) # Should still work def test_fileno(self): rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertEqual(42, bufio.fileno()) @unittest.skip('test having existential crisis') def test_no_fileno(self): # XXX will we always have fileno() function? If so, kill # this test. Else, write it. pass def test_invalid_args(self): rawio = self.MockRawIO() bufio = self.tp(rawio) # Invalid whence self.assertRaises(ValueError, bufio.seek, 0, -1) self.assertRaises(ValueError, bufio.seek, 0, 3) def test_override_destructor(self): tp = self.tp record = [] class MyBufferedIO(tp): def __del__(self): record.append(1) try: f = super(MyBufferedIO, self).__del__ except AttributeError: pass else: f() def close(self): record.append(2) super(MyBufferedIO, self).close() def flush(self): record.append(3) super(MyBufferedIO, self).flush() rawio = self.MockRawIO() bufio = MyBufferedIO(rawio) writable = bufio.writable() del bufio support.gc_collect() if writable: self.assertEqual(record, [1, 2, 3]) else: self.assertEqual(record, [1, 2]) def test_context_manager(self): # Test usability as a context manager rawio = self.MockRawIO() bufio = self.tp(rawio) def _with(): with bufio: pass _with() # bufio should now be closed, and using it a second time should raise # a ValueError. self.assertRaises(ValueError, _with) def test_error_through_destructor(self): # Test that the exception state is not modified by a destructor, # even if close() fails. rawio = self.CloseFailureIO() def f(): self.tp(rawio).xyzzy with support.captured_output("stderr") as s: self.assertRaises(AttributeError, f) s = s.getvalue().strip() if s: # The destructor *may* have printed an unraisable error, check it self.assertEqual(len(s.splitlines()), 1) self.assertTrue(s.startswith("Exception IOError: "), s) self.assertTrue(s.endswith(" ignored"), s) def test_repr(self): raw = self.MockRawIO() b = self.tp(raw) clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__) self.assertEqual(repr(b), "<%s>" % clsname) raw.name = "dummy" self.assertEqual(repr(b), "<%s name=u'dummy'>" % clsname) raw.name = b"dummy" self.assertEqual(repr(b), "<%s name='dummy'>" % clsname) def test_flush_error_on_close(self): # Test that buffered file is closed despite failed flush # and that flush() is called before file closed. raw = self.MockRawIO() closed = [] def bad_flush(): closed[:] = [b.closed, raw.closed] raise IOError() raw.flush = bad_flush b = self.tp(raw) self.assertRaises(IOError, b.close) # exception not swallowed self.assertTrue(b.closed) self.assertTrue(raw.closed) self.assertTrue(closed) # flush() called self.assertFalse(closed[0]) # flush() called before file closed self.assertFalse(closed[1]) raw.flush = lambda: None # break reference loop def test_close_error_on_close(self): raw = self.MockRawIO() def bad_flush(): raise IOError('flush') def bad_close(): raise IOError('close') raw.close = bad_close b = self.tp(raw) b.flush = bad_flush with self.assertRaises(IOError) as err: # exception not swallowed b.close() self.assertEqual(err.exception.args, ('close',)) self.assertFalse(b.closed) def test_multi_close(self): raw = self.MockRawIO() b = self.tp(raw) b.close() b.close() b.close() self.assertRaises(ValueError, b.flush) def test_readonly_attributes(self): raw = self.MockRawIO() buf = self.tp(raw) x = self.MockRawIO() with self.assertRaises((AttributeError, TypeError)): buf.raw = x class SizeofTest: @support.cpython_only def test_sizeof(self): bufsize1 = 4096 bufsize2 = 8192 rawio = self.MockRawIO() bufio = self.tp(rawio, buffer_size=bufsize1) size = sys.getsizeof(bufio) - bufsize1 rawio = self.MockRawIO() bufio = self.tp(rawio, buffer_size=bufsize2) self.assertEqual(sys.getsizeof(bufio), size + bufsize2) class BufferedReaderTest(unittest.TestCase, CommonBufferedTests): read_mode = "rb" def test_constructor(self): rawio = self.MockRawIO([b"abc"]) bufio = self.tp(rawio) bufio.__init__(rawio) bufio.__init__(rawio, buffer_size=1024) bufio.__init__(rawio, buffer_size=16) self.assertEqual(b"abc", bufio.read()) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) rawio = self.MockRawIO([b"abc"]) bufio.__init__(rawio) self.assertEqual(b"abc", bufio.read()) def test_uninitialized(self): bufio = self.tp.__new__(self.tp) del bufio bufio = self.tp.__new__(self.tp) self.assertRaisesRegexp((ValueError, AttributeError), 'uninitialized|has no attribute', bufio.read, 0) bufio.__init__(self.MockRawIO()) self.assertEqual(bufio.read(0), b'') def test_read(self): for arg in (None, 7): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"abcdefg", bufio.read(arg)) # Invalid args self.assertRaises(ValueError, bufio.read, -2) def test_read1(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"a", bufio.read(1)) self.assertEqual(b"b", bufio.read1(1)) self.assertEqual(rawio._reads, 1) self.assertEqual(b"c", bufio.read1(100)) self.assertEqual(rawio._reads, 1) self.assertEqual(b"d", bufio.read1(100)) self.assertEqual(rawio._reads, 2) self.assertEqual(b"efg", bufio.read1(100)) self.assertEqual(rawio._reads, 3) self.assertEqual(b"", bufio.read1(100)) self.assertEqual(rawio._reads, 4) # Invalid args self.assertRaises(ValueError, bufio.read1, -1) def test_readinto(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) b = bytearray(2) self.assertEqual(bufio.readinto(b), 2) self.assertEqual(b, b"ab") self.assertEqual(bufio.readinto(b), 2) self.assertEqual(b, b"cd") self.assertEqual(bufio.readinto(b), 2) self.assertEqual(b, b"ef") self.assertEqual(bufio.readinto(b), 1) self.assertEqual(b, b"gf") self.assertEqual(bufio.readinto(b), 0) self.assertEqual(b, b"gf") def test_readlines(self): def bufio(): rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef")) return self.tp(rawio) self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"]) self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"]) self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"]) def test_buffering(self): data = b"abcdefghi" dlen = len(data) tests = [ [ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ], [ 100, [ 3, 3, 3], [ dlen ] ], [ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ], ] for bufsize, buf_read_sizes, raw_read_sizes in tests: rawio = self.MockFileIO(data) bufio = self.tp(rawio, buffer_size=bufsize) pos = 0 for nbytes in buf_read_sizes: self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes]) pos += nbytes # this is mildly implementation-dependent self.assertEqual(rawio.read_history, raw_read_sizes) def test_read_non_blocking(self): # Inject some None's in there to simulate EWOULDBLOCK rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None)) bufio = self.tp(rawio) self.assertEqual(b"abcd", bufio.read(6)) self.assertEqual(b"e", bufio.read(1)) self.assertEqual(b"fg", bufio.read()) self.assertEqual(b"", bufio.peek(1)) self.assertIsNone(bufio.read()) self.assertEqual(b"", bufio.read()) rawio = self.MockRawIO((b"a", None, None)) self.assertEqual(b"a", rawio.readall()) self.assertIsNone(rawio.readall()) def test_read_past_eof(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"abcdefg", bufio.read(9000)) def test_read_all(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"abcdefg", bufio.read()) @unittest.skipUnless(threading, 'Threading required for this test.') @support.requires_resource('cpu') def test_threads(self): try: # Write out many bytes with exactly the same number of 0's, # 1's... 255's. This will help us check that concurrent reading # doesn't duplicate or forget contents. N = 1000 l = list(range(256)) * N random.shuffle(l) s = bytes(bytearray(l)) with self.open(support.TESTFN, "wb") as f: f.write(s) with self.open(support.TESTFN, self.read_mode, buffering=0) as raw: bufio = self.tp(raw, 8) errors = [] results = [] def f(): try: # Intra-buffer read then buffer-flushing read for n in cycle([1, 19]): s = bufio.read(n) if not s: break # list.append() is atomic results.append(s) except Exception as e: errors.append(e) raise threads = [threading.Thread(target=f) for x in range(20)] with support.start_threads(threads): time.sleep(0.02) # yield self.assertFalse(errors, "the following exceptions were caught: %r" % errors) s = b''.join(results) for i in range(256): c = bytes(bytearray([i])) self.assertEqual(s.count(c), N) finally: support.unlink(support.TESTFN) def test_misbehaved_io(self): rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertRaises(IOError, bufio.seek, 0) self.assertRaises(IOError, bufio.tell) def test_no_extraneous_read(self): # Issue #9550; when the raw IO object has satisfied the read request, # we should not issue any additional reads, otherwise it may block # (e.g. socket). bufsize = 16 for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2): rawio = self.MockRawIO([b"x" * n]) bufio = self.tp(rawio, bufsize) self.assertEqual(bufio.read(n), b"x" * n) # Simple case: one raw read is enough to satisfy the request. self.assertEqual(rawio._extraneous_reads, 0, "failed for {}: {} != 0".format(n, rawio._extraneous_reads)) # A more complex case where two raw reads are needed to satisfy # the request. rawio = self.MockRawIO([b"x" * (n - 1), b"x"]) bufio = self.tp(rawio, bufsize) self.assertEqual(bufio.read(n), b"x" * n) self.assertEqual(rawio._extraneous_reads, 0, "failed for {}: {} != 0".format(n, rawio._extraneous_reads)) class CBufferedReaderTest(BufferedReaderTest, SizeofTest): tp = io.BufferedReader def test_constructor(self): BufferedReaderTest.test_constructor(self) # The allocation can succeed on 32-bit builds, e.g. with more # than 2GB RAM and a 64-bit kernel. if sys.maxsize > 0x7FFFFFFF: rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises((OverflowError, MemoryError, ValueError), bufio.__init__, rawio, sys.maxsize) def test_initialization(self): rawio = self.MockRawIO([b"abc"]) bufio = self.tp(rawio) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.read) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.read) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) self.assertRaises(ValueError, bufio.read) def test_misbehaved_io_read(self): rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) # _pyio.BufferedReader seems to implement reading different, so that # checking this is not so easy. self.assertRaises(IOError, bufio.read, 10) def test_garbage_collection(self): # C BufferedReader objects are collected. # The Python version has __del__, so it ends into gc.garbage instead rawio = self.FileIO(support.TESTFN, "w+b") f = self.tp(rawio) f.f = f wr = weakref.ref(f) del f support.gc_collect() self.assertTrue(wr() is None, wr) def test_args_error(self): # Issue #17275 with self.assertRaisesRegexp(TypeError, "BufferedReader"): self.tp(io.BytesIO(), 1024, 1024, 1024) class PyBufferedReaderTest(BufferedReaderTest): tp = pyio.BufferedReader class BufferedWriterTest(unittest.TestCase, CommonBufferedTests): write_mode = "wb" def test_constructor(self): rawio = self.MockRawIO() bufio = self.tp(rawio) bufio.__init__(rawio) bufio.__init__(rawio, buffer_size=1024) bufio.__init__(rawio, buffer_size=16) self.assertEqual(3, bufio.write(b"abc")) bufio.flush() self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) bufio.__init__(rawio) self.assertEqual(3, bufio.write(b"ghi")) bufio.flush() self.assertEqual(b"".join(rawio._write_stack), b"abcghi") def test_uninitialized(self): bufio = self.tp.__new__(self.tp) del bufio bufio = self.tp.__new__(self.tp) self.assertRaisesRegexp((ValueError, AttributeError), 'uninitialized|has no attribute', bufio.write, b'') bufio.__init__(self.MockRawIO()) self.assertEqual(bufio.write(b''), 0) def test_detach_flush(self): raw = self.MockRawIO() buf = self.tp(raw) buf.write(b"howdy!") self.assertFalse(raw._write_stack) buf.detach() self.assertEqual(raw._write_stack, [b"howdy!"]) def test_write(self): # Write to the buffered IO but don't overflow the buffer. writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.write(b"abc") self.assertFalse(writer._write_stack) def test_write_overflow(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) contents = b"abcdefghijklmnop" for n in range(0, len(contents), 3): bufio.write(contents[n:n+3]) flushed = b"".join(writer._write_stack) # At least (total - 8) bytes were implicitly flushed, perhaps more # depending on the implementation. self.assertTrue(flushed.startswith(contents[:-8]), flushed) def check_writes(self, intermediate_func): # Lots of writes, test the flushed output is as expected. contents = bytes(range(256)) * 1000 n = 0 writer = self.MockRawIO() bufio = self.tp(writer, 13) # Generator of write sizes: repeat each N 15 times then proceed to N+1 def gen_sizes(): for size in count(1): for i in range(15): yield size sizes = gen_sizes() while n < len(contents): size = min(next(sizes), len(contents) - n) self.assertEqual(bufio.write(contents[n:n+size]), size) intermediate_func(bufio) n += size bufio.flush() self.assertEqual(contents, b"".join(writer._write_stack)) def test_writes(self): self.check_writes(lambda bufio: None) def test_writes_and_flushes(self): self.check_writes(lambda bufio: bufio.flush()) def test_writes_and_seeks(self): def _seekabs(bufio): pos = bufio.tell() bufio.seek(pos + 1, 0) bufio.seek(pos - 1, 0) bufio.seek(pos, 0) self.check_writes(_seekabs) def _seekrel(bufio): pos = bufio.seek(0, 1) bufio.seek(+1, 1) bufio.seek(-1, 1) bufio.seek(pos, 0) self.check_writes(_seekrel) def test_writes_and_truncates(self): self.check_writes(lambda bufio: bufio.truncate(bufio.tell())) def test_write_non_blocking(self): raw = self.MockNonBlockWriterIO() bufio = self.tp(raw, 8) self.assertEqual(bufio.write(b"abcd"), 4) self.assertEqual(bufio.write(b"efghi"), 5) # 1 byte will be written, the rest will be buffered raw.block_on(b"k") self.assertEqual(bufio.write(b"jklmn"), 5) # 8 bytes will be written, 8 will be buffered and the rest will be lost raw.block_on(b"0") try: bufio.write(b"opqrwxyz0123456789") except self.BlockingIOError as e: written = e.characters_written else: self.fail("BlockingIOError should have been raised") self.assertEqual(written, 16) self.assertEqual(raw.pop_written(), b"abcdefghijklmnopqrwxyz") self.assertEqual(bufio.write(b"ABCDEFGHI"), 9) s = raw.pop_written() # Previously buffered bytes were flushed self.assertTrue(s.startswith(b"01234567A"), s) def test_write_and_rewind(self): raw = io.BytesIO() bufio = self.tp(raw, 4) self.assertEqual(bufio.write(b"abcdef"), 6) self.assertEqual(bufio.tell(), 6) bufio.seek(0, 0) self.assertEqual(bufio.write(b"XY"), 2) bufio.seek(6, 0) self.assertEqual(raw.getvalue(), b"XYcdef") self.assertEqual(bufio.write(b"123456"), 6) bufio.flush() self.assertEqual(raw.getvalue(), b"XYcdef123456") def test_flush(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.write(b"abc") bufio.flush() self.assertEqual(b"abc", writer._write_stack[0]) def test_writelines(self): l = [b'ab', b'cd', b'ef'] writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.writelines(l) bufio.flush() self.assertEqual(b''.join(writer._write_stack), b'abcdef') def test_writelines_userlist(self): l = UserList([b'ab', b'cd', b'ef']) writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.writelines(l) bufio.flush() self.assertEqual(b''.join(writer._write_stack), b'abcdef') def test_writelines_error(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) self.assertRaises(TypeError, bufio.writelines, [1, 2, 3]) self.assertRaises(TypeError, bufio.writelines, None) def test_destructor(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.write(b"abc") del bufio support.gc_collect() self.assertEqual(b"abc", writer._write_stack[0]) def test_truncate(self): # Truncate implicitly flushes the buffer. with self.open(support.TESTFN, self.write_mode, buffering=0) as raw: bufio = self.tp(raw, 8) bufio.write(b"abcdef") self.assertEqual(bufio.truncate(3), 3) self.assertEqual(bufio.tell(), 6) with self.open(support.TESTFN, "rb", buffering=0) as f: self.assertEqual(f.read(), b"abc") @unittest.skipUnless(threading, 'Threading required for this test.') @support.requires_resource('cpu') def test_threads(self): try: # Write out many bytes from many threads and test they were # all flushed. N = 1000 contents = bytes(range(256)) * N sizes = cycle([1, 19]) n = 0 queue = deque() while n < len(contents): size = next(sizes) queue.append(contents[n:n+size]) n += size del contents # We use a real file object because it allows us to # exercise situations where the GIL is released before # writing the buffer to the raw streams. This is in addition # to concurrency issues due to switching threads in the middle # of Python code. with self.open(support.TESTFN, self.write_mode, buffering=0) as raw: bufio = self.tp(raw, 8) errors = [] def f(): try: while True: try: s = queue.popleft() except IndexError: return bufio.write(s) except Exception as e: errors.append(e) raise threads = [threading.Thread(target=f) for x in range(20)] with support.start_threads(threads): time.sleep(0.02) # yield self.assertFalse(errors, "the following exceptions were caught: %r" % errors) bufio.close() with self.open(support.TESTFN, "rb") as f: s = f.read() for i in range(256): self.assertEqual(s.count(bytes([i])), N) finally: support.unlink(support.TESTFN) def test_misbehaved_io(self): rawio = self.MisbehavedRawIO() bufio = self.tp(rawio, 5)<|fim▁hole|> self.assertRaises(IOError, bufio.seek, 0) self.assertRaises(IOError, bufio.tell) self.assertRaises(IOError, bufio.write, b"abcdef") def test_max_buffer_size_deprecation(self): with support.check_warnings(("max_buffer_size is deprecated", DeprecationWarning)): self.tp(self.MockRawIO(), 8, 12) def test_write_error_on_close(self): raw = self.MockRawIO() def bad_write(b): raise IOError() raw.write = bad_write b = self.tp(raw) b.write(b'spam') self.assertRaises(IOError, b.close) # exception not swallowed self.assertTrue(b.closed) class CBufferedWriterTest(BufferedWriterTest, SizeofTest): tp = io.BufferedWriter def test_constructor(self): BufferedWriterTest.test_constructor(self) # The allocation can succeed on 32-bit builds, e.g. with more # than 2GB RAM and a 64-bit kernel. if sys.maxsize > 0x7FFFFFFF: rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises((OverflowError, MemoryError, ValueError), bufio.__init__, rawio, sys.maxsize) def test_initialization(self): rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.write, b"def") self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.write, b"def") self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) self.assertRaises(ValueError, bufio.write, b"def") def test_garbage_collection(self): # C BufferedWriter objects are collected, and collecting them flushes # all data to disk. # The Python version has __del__, so it ends into gc.garbage instead rawio = self.FileIO(support.TESTFN, "w+b") f = self.tp(rawio) f.write(b"123xxx") f.x = f wr = weakref.ref(f) del f support.gc_collect() self.assertTrue(wr() is None, wr) with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"123xxx") def test_args_error(self): # Issue #17275 with self.assertRaisesRegexp(TypeError, "BufferedWriter"): self.tp(io.BytesIO(), 1024, 1024, 1024) class PyBufferedWriterTest(BufferedWriterTest): tp = pyio.BufferedWriter class BufferedRWPairTest(unittest.TestCase): def test_constructor(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertFalse(pair.closed) def test_uninitialized(self): pair = self.tp.__new__(self.tp) del pair pair = self.tp.__new__(self.tp) self.assertRaisesRegexp((ValueError, AttributeError), 'uninitialized|has no attribute', pair.read, 0) self.assertRaisesRegexp((ValueError, AttributeError), 'uninitialized|has no attribute', pair.write, b'') pair.__init__(self.MockRawIO(), self.MockRawIO()) self.assertEqual(pair.read(0), b'') self.assertEqual(pair.write(b''), 0) def test_detach(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertRaises(self.UnsupportedOperation, pair.detach) def test_constructor_max_buffer_size_deprecation(self): with support.check_warnings(("max_buffer_size is deprecated", DeprecationWarning)): self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12) def test_constructor_with_not_readable(self): class NotReadable(MockRawIO): def readable(self): return False self.assertRaises(IOError, self.tp, NotReadable(), self.MockRawIO()) def test_constructor_with_not_writeable(self): class NotWriteable(MockRawIO): def writable(self): return False self.assertRaises(IOError, self.tp, self.MockRawIO(), NotWriteable()) def test_read(self): pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) self.assertEqual(pair.read(3), b"abc") self.assertEqual(pair.read(1), b"d") self.assertEqual(pair.read(), b"ef") pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO()) self.assertEqual(pair.read(None), b"abc") def test_readlines(self): pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO()) self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"]) self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"]) self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"]) def test_read1(self): # .read1() is delegated to the underlying reader object, so this test # can be shallow. pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) self.assertEqual(pair.read1(3), b"abc") def test_readinto(self): pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) data = bytearray(5) self.assertEqual(pair.readinto(data), 5) self.assertEqual(data, b"abcde") def test_write(self): w = self.MockRawIO() pair = self.tp(self.MockRawIO(), w) pair.write(b"abc") pair.flush() pair.write(b"def") pair.flush() self.assertEqual(w._write_stack, [b"abc", b"def"]) def test_peek(self): pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) self.assertTrue(pair.peek(3).startswith(b"abc")) self.assertEqual(pair.read(3), b"abc") def test_readable(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertTrue(pair.readable()) def test_writeable(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertTrue(pair.writable()) def test_seekable(self): # BufferedRWPairs are never seekable, even if their readers and writers # are. pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertFalse(pair.seekable()) # .flush() is delegated to the underlying writer object and has been # tested in the test_write method. def test_close_and_closed(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertFalse(pair.closed) pair.close() self.assertTrue(pair.closed) def test_reader_close_error_on_close(self): def reader_close(): reader_non_existing reader = self.MockRawIO() reader.close = reader_close writer = self.MockRawIO() pair = self.tp(reader, writer) with self.assertRaises(NameError) as err: pair.close() self.assertIn('reader_non_existing', str(err.exception)) self.assertTrue(pair.closed) self.assertFalse(reader.closed) self.assertTrue(writer.closed) def test_writer_close_error_on_close(self): def writer_close(): writer_non_existing reader = self.MockRawIO() writer = self.MockRawIO() writer.close = writer_close pair = self.tp(reader, writer) with self.assertRaises(NameError) as err: pair.close() self.assertIn('writer_non_existing', str(err.exception)) self.assertFalse(pair.closed) self.assertTrue(reader.closed) self.assertFalse(writer.closed) def test_reader_writer_close_error_on_close(self): def reader_close(): reader_non_existing def writer_close(): writer_non_existing reader = self.MockRawIO() reader.close = reader_close writer = self.MockRawIO() writer.close = writer_close pair = self.tp(reader, writer) with self.assertRaises(NameError) as err: pair.close() self.assertIn('reader_non_existing', str(err.exception)) self.assertFalse(pair.closed) self.assertFalse(reader.closed) self.assertFalse(writer.closed) def test_isatty(self): class SelectableIsAtty(MockRawIO): def __init__(self, isatty): MockRawIO.__init__(self) self._isatty = isatty def isatty(self): return self._isatty pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False)) self.assertFalse(pair.isatty()) pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False)) self.assertTrue(pair.isatty()) pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True)) self.assertTrue(pair.isatty()) pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True)) self.assertTrue(pair.isatty()) def test_weakref_clearing(self): brw = self.tp(self.MockRawIO(), self.MockRawIO()) ref = weakref.ref(brw) brw = None ref = None # Shouldn't segfault. class CBufferedRWPairTest(BufferedRWPairTest): tp = io.BufferedRWPair class PyBufferedRWPairTest(BufferedRWPairTest): tp = pyio.BufferedRWPair class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest): read_mode = "rb+" write_mode = "wb+" def test_constructor(self): BufferedReaderTest.test_constructor(self) BufferedWriterTest.test_constructor(self) def test_uninitialized(self): BufferedReaderTest.test_uninitialized(self) BufferedWriterTest.test_uninitialized(self) def test_read_and_write(self): raw = self.MockRawIO((b"asdf", b"ghjk")) rw = self.tp(raw, 8) self.assertEqual(b"as", rw.read(2)) rw.write(b"ddd") rw.write(b"eee") self.assertFalse(raw._write_stack) # Buffer writes self.assertEqual(b"ghjk", rw.read()) self.assertEqual(b"dddeee", raw._write_stack[0]) def test_seek_and_tell(self): raw = self.BytesIO(b"asdfghjkl") rw = self.tp(raw) self.assertEqual(b"as", rw.read(2)) self.assertEqual(2, rw.tell()) rw.seek(0, 0) self.assertEqual(b"asdf", rw.read(4)) rw.write(b"123f") rw.seek(0, 0) self.assertEqual(b"asdf123fl", rw.read()) self.assertEqual(9, rw.tell()) rw.seek(-4, 2) self.assertEqual(5, rw.tell()) rw.seek(2, 1) self.assertEqual(7, rw.tell()) self.assertEqual(b"fl", rw.read(11)) rw.flush() self.assertEqual(b"asdf123fl", raw.getvalue()) self.assertRaises(TypeError, rw.seek, 0.0) def check_flush_and_read(self, read_func): raw = self.BytesIO(b"abcdefghi") bufio = self.tp(raw) self.assertEqual(b"ab", read_func(bufio, 2)) bufio.write(b"12") self.assertEqual(b"ef", read_func(bufio, 2)) self.assertEqual(6, bufio.tell()) bufio.flush() self.assertEqual(6, bufio.tell()) self.assertEqual(b"ghi", read_func(bufio)) raw.seek(0, 0) raw.write(b"XYZ") # flush() resets the read buffer bufio.flush() bufio.seek(0, 0) self.assertEqual(b"XYZ", read_func(bufio, 3)) def test_flush_and_read(self): self.check_flush_and_read(lambda bufio, *args: bufio.read(*args)) def test_flush_and_readinto(self): def _readinto(bufio, n=-1): b = bytearray(n if n >= 0 else 9999) n = bufio.readinto(b) return bytes(b[:n]) self.check_flush_and_read(_readinto) def test_flush_and_peek(self): def _peek(bufio, n=-1): # This relies on the fact that the buffer can contain the whole # raw stream, otherwise peek() can return less. b = bufio.peek(n) if n != -1: b = b[:n] bufio.seek(len(b), 1) return b self.check_flush_and_read(_peek) def test_flush_and_write(self): raw = self.BytesIO(b"abcdefghi") bufio = self.tp(raw) bufio.write(b"123") bufio.flush() bufio.write(b"45") bufio.flush() bufio.seek(0, 0) self.assertEqual(b"12345fghi", raw.getvalue()) self.assertEqual(b"12345fghi", bufio.read()) def test_threads(self): BufferedReaderTest.test_threads(self) BufferedWriterTest.test_threads(self) def test_writes_and_peek(self): def _peek(bufio): bufio.peek(1) self.check_writes(_peek) def _peek(bufio): pos = bufio.tell() bufio.seek(-1, 1) bufio.peek(1) bufio.seek(pos, 0) self.check_writes(_peek) def test_writes_and_reads(self): def _read(bufio): bufio.seek(-1, 1) bufio.read(1) self.check_writes(_read) def test_writes_and_read1s(self): def _read1(bufio): bufio.seek(-1, 1) bufio.read1(1) self.check_writes(_read1) def test_writes_and_readintos(self): def _read(bufio): bufio.seek(-1, 1) bufio.readinto(bytearray(1)) self.check_writes(_read) def test_write_after_readahead(self): # Issue #6629: writing after the buffer was filled by readahead should # first rewind the raw stream. for overwrite_size in [1, 5]: raw = self.BytesIO(b"A" * 10) bufio = self.tp(raw, 4) # Trigger readahead self.assertEqual(bufio.read(1), b"A") self.assertEqual(bufio.tell(), 1) # Overwriting should rewind the raw stream if it needs so bufio.write(b"B" * overwrite_size) self.assertEqual(bufio.tell(), overwrite_size + 1) # If the write size was smaller than the buffer size, flush() and # check that rewind happens. bufio.flush() self.assertEqual(bufio.tell(), overwrite_size + 1) s = raw.getvalue() self.assertEqual(s, b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size)) def test_write_rewind_write(self): # Various combinations of reading / writing / seeking backwards / writing again def mutate(bufio, pos1, pos2): assert pos2 >= pos1 # Fill the buffer bufio.seek(pos1) bufio.read(pos2 - pos1) bufio.write(b'\x02') # This writes earlier than the previous write, but still inside # the buffer. bufio.seek(pos1) bufio.write(b'\x01') b = b"\x80\x81\x82\x83\x84" for i in range(0, len(b)): for j in range(i, len(b)): raw = self.BytesIO(b) bufio = self.tp(raw, 100) mutate(bufio, i, j) bufio.flush() expected = bytearray(b) expected[j] = 2 expected[i] = 1 self.assertEqual(raw.getvalue(), expected, "failed result for i=%d, j=%d" % (i, j)) def test_truncate_after_read_or_write(self): raw = self.BytesIO(b"A" * 10) bufio = self.tp(raw, 100) self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled self.assertEqual(bufio.truncate(), 2) self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases self.assertEqual(bufio.truncate(), 4) def test_misbehaved_io(self): BufferedReaderTest.test_misbehaved_io(self) BufferedWriterTest.test_misbehaved_io(self) def test_interleaved_read_write(self): # Test for issue #12213 with self.BytesIO(b'abcdefgh') as raw: with self.tp(raw, 100) as f: f.write(b"1") self.assertEqual(f.read(1), b'b') f.write(b'2') self.assertEqual(f.read1(1), b'd') f.write(b'3') buf = bytearray(1) f.readinto(buf) self.assertEqual(buf, b'f') f.write(b'4') self.assertEqual(f.peek(1), b'h') f.flush() self.assertEqual(raw.getvalue(), b'1b2d3f4h') with self.BytesIO(b'abc') as raw: with self.tp(raw, 100) as f: self.assertEqual(f.read(1), b'a') f.write(b"2") self.assertEqual(f.read(1), b'c') f.flush() self.assertEqual(raw.getvalue(), b'a2c') def test_interleaved_readline_write(self): with self.BytesIO(b'ab\ncdef\ng\n') as raw: with self.tp(raw) as f: f.write(b'1') self.assertEqual(f.readline(), b'b\n') f.write(b'2') self.assertEqual(f.readline(), b'def\n') f.write(b'3') self.assertEqual(f.readline(), b'\n') f.flush() self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n') class CBufferedRandomTest(CBufferedReaderTest, CBufferedWriterTest, BufferedRandomTest, SizeofTest): tp = io.BufferedRandom def test_constructor(self): BufferedRandomTest.test_constructor(self) # The allocation can succeed on 32-bit builds, e.g. with more # than 2GB RAM and a 64-bit kernel. if sys.maxsize > 0x7FFFFFFF: rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises((OverflowError, MemoryError, ValueError), bufio.__init__, rawio, sys.maxsize) def test_garbage_collection(self): CBufferedReaderTest.test_garbage_collection(self) CBufferedWriterTest.test_garbage_collection(self) def test_args_error(self): # Issue #17275 with self.assertRaisesRegexp(TypeError, "BufferedRandom"): self.tp(io.BytesIO(), 1024, 1024, 1024) class PyBufferedRandomTest(BufferedRandomTest): tp = pyio.BufferedRandom # To fully exercise seek/tell, the StatefulIncrementalDecoder has these # properties: # - A single output character can correspond to many bytes of input. # - The number of input bytes to complete the character can be # undetermined until the last input byte is received. # - The number of input bytes can vary depending on previous input. # - A single input byte can correspond to many characters of output. # - The number of output characters can be undetermined until the # last input byte is received. # - The number of output characters can vary depending on previous input. class StatefulIncrementalDecoder(codecs.IncrementalDecoder): """ For testing seek/tell behavior with a stateful, buffering decoder. Input is a sequence of words. Words may be fixed-length (length set by input) or variable-length (period-terminated). In variable-length mode, extra periods are ignored. Possible words are: - 'i' followed by a number sets the input length, I (maximum 99). When I is set to 0, words are space-terminated. - 'o' followed by a number sets the output length, O (maximum 99). - Any other word is converted into a word followed by a period on the output. The output word consists of the input word truncated or padded out with hyphens to make its length equal to O. If O is 0, the word is output verbatim without truncating or padding. I and O are initially set to 1. When I changes, any buffered input is re-scanned according to the new I. EOF also terminates the last word. """ def __init__(self, errors='strict'): codecs.IncrementalDecoder.__init__(self, errors) self.reset() def __repr__(self): return '<SID %x>' % id(self) def reset(self): self.i = 1 self.o = 1 self.buffer = bytearray() def getstate(self): i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset() return bytes(self.buffer), i*100 + o def setstate(self, state): buffer, io = state self.buffer = bytearray(buffer) i, o = divmod(io, 100) self.i, self.o = i ^ 1, o ^ 1 def decode(self, input, final=False): output = '' for b in input: if self.i == 0: # variable-length, terminated with period if b == '.': if self.buffer: output += self.process_word() else: self.buffer.append(b) else: # fixed-length, terminate after self.i bytes self.buffer.append(b) if len(self.buffer) == self.i: output += self.process_word() if final and self.buffer: # EOF terminates the last word output += self.process_word() return output def process_word(self): output = '' if self.buffer[0] == ord('i'): self.i = min(99, int(self.buffer[1:] or 0)) # set input length elif self.buffer[0] == ord('o'): self.o = min(99, int(self.buffer[1:] or 0)) # set output length else: output = self.buffer.decode('ascii') if len(output) < self.o: output += '-'*self.o # pad out with hyphens if self.o: output = output[:self.o] # truncate to output length output += '.' self.buffer = bytearray() return output codecEnabled = False @classmethod def lookupTestDecoder(cls, name): if cls.codecEnabled and name == 'test_decoder': latin1 = codecs.lookup('latin-1') return codecs.CodecInfo( name='test_decoder', encode=latin1.encode, decode=None, incrementalencoder=None, streamreader=None, streamwriter=None, incrementaldecoder=cls) # Register the previous decoder for testing. # Disabled by default, tests will enable it. codecs.register(StatefulIncrementalDecoder.lookupTestDecoder) class StatefulIncrementalDecoderTest(unittest.TestCase): """ Make sure the StatefulIncrementalDecoder actually works. """ test_cases = [ # I=1, O=1 (fixed-length input == fixed-length output) (b'abcd', False, 'a.b.c.d.'), # I=0, O=0 (variable-length input, variable-length output) (b'oiabcd', True, 'abcd.'), # I=0, O=0 (should ignore extra periods) (b'oi...abcd...', True, 'abcd.'), # I=0, O=6 (variable-length input, fixed-length output) (b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'), # I=2, O=6 (fixed-length input < fixed-length output) (b'i.i2.o6xyz', True, 'xy----.z-----.'), # I=6, O=3 (fixed-length input > fixed-length output) (b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'), # I=0, then 3; O=29, then 15 (with longer output) (b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True, 'a----------------------------.' + 'b----------------------------.' + 'cde--------------------------.' + 'abcdefghijabcde.' + 'a.b------------.' + '.c.------------.' + 'd.e------------.' + 'k--------------.' + 'l--------------.' + 'm--------------.') ] def test_decoder(self): # Try a few one-shot test cases. for input, eof, output in self.test_cases: d = StatefulIncrementalDecoder() self.assertEqual(d.decode(input, eof), output) # Also test an unfinished decode, followed by forcing EOF. d = StatefulIncrementalDecoder() self.assertEqual(d.decode(b'oiabcd'), '') self.assertEqual(d.decode(b'', 1), 'abcd.') class TextIOWrapperTest(unittest.TestCase): def setUp(self): self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n" self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii") support.unlink(support.TESTFN) def tearDown(self): support.unlink(support.TESTFN) def test_constructor(self): r = self.BytesIO(b"\xc3\xa9\n\n") b = self.BufferedReader(r, 1000) t = self.TextIOWrapper(b) t.__init__(b, encoding="latin1", newline="\r\n") self.assertEqual(t.encoding, "latin1") self.assertEqual(t.line_buffering, False) t.__init__(b, encoding="utf8", line_buffering=True) self.assertEqual(t.encoding, "utf8") self.assertEqual(t.line_buffering, True) self.assertEqual("\xe9\n", t.readline()) self.assertRaises(TypeError, t.__init__, b, newline=42) self.assertRaises(ValueError, t.__init__, b, newline='xyzzy') def test_uninitialized(self): t = self.TextIOWrapper.__new__(self.TextIOWrapper) del t t = self.TextIOWrapper.__new__(self.TextIOWrapper) self.assertRaises(Exception, repr, t) self.assertRaisesRegexp((ValueError, AttributeError), 'uninitialized|has no attribute', t.read, 0) t.__init__(self.MockRawIO()) self.assertEqual(t.read(0), u'') def test_detach(self): r = self.BytesIO() b = self.BufferedWriter(r) t = self.TextIOWrapper(b) self.assertIs(t.detach(), b) t = self.TextIOWrapper(b, encoding="ascii") t.write("howdy") self.assertFalse(r.getvalue()) t.detach() self.assertEqual(r.getvalue(), b"howdy") self.assertRaises(ValueError, t.detach) # Operations independent of the detached stream should still work repr(t) self.assertEqual(t.encoding, "ascii") self.assertEqual(t.errors, "strict") self.assertFalse(t.line_buffering) def test_repr(self): raw = self.BytesIO("hello".encode("utf-8")) b = self.BufferedReader(raw) t = self.TextIOWrapper(b, encoding="utf-8") modname = self.TextIOWrapper.__module__ self.assertEqual(repr(t), "<%s.TextIOWrapper encoding='utf-8'>" % modname) raw.name = "dummy" self.assertEqual(repr(t), "<%s.TextIOWrapper name=u'dummy' encoding='utf-8'>" % modname) raw.name = b"dummy" self.assertEqual(repr(t), "<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname) t.buffer.detach() repr(t) # Should not raise an exception def test_line_buffering(self): r = self.BytesIO() b = self.BufferedWriter(r, 1000) t = self.TextIOWrapper(b, newline="\n", line_buffering=True) t.write("X") self.assertEqual(r.getvalue(), b"") # No flush happened t.write("Y\nZ") self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed t.write("A\rB") self.assertEqual(r.getvalue(), b"XY\nZA\rB") def test_encoding(self): # Check the encoding attribute is always set, and valid b = self.BytesIO() t = self.TextIOWrapper(b, encoding="utf8") self.assertEqual(t.encoding, "utf8") t = self.TextIOWrapper(b) self.assertTrue(t.encoding is not None) codecs.lookup(t.encoding) def test_encoding_errors_reading(self): # (1) default b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii") self.assertRaises(UnicodeError, t.read) # (2) explicit strict b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii", errors="strict") self.assertRaises(UnicodeError, t.read) # (3) ignore b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii", errors="ignore") self.assertEqual(t.read(), "abc\n\n") # (4) replace b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii", errors="replace") self.assertEqual(t.read(), "abc\n\ufffd\n") def test_encoding_errors_writing(self): # (1) default b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii") self.assertRaises(UnicodeError, t.write, "\xff") # (2) explicit strict b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii", errors="strict") self.assertRaises(UnicodeError, t.write, "\xff") # (3) ignore b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii", errors="ignore", newline="\n") t.write("abc\xffdef\n") t.flush() self.assertEqual(b.getvalue(), b"abcdef\n") # (4) replace b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii", errors="replace", newline="\n") t.write("abc\xffdef\n") t.flush() self.assertEqual(b.getvalue(), b"abc?def\n") def test_newlines(self): input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ] tests = [ [ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ], [ '', input_lines ], [ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ], [ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ], [ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ], ] encodings = ( 'utf-8', 'latin-1', 'utf-16', 'utf-16-le', 'utf-16-be', 'utf-32', 'utf-32-le', 'utf-32-be', ) # Try a range of buffer sizes to test the case where \r is the last # character in TextIOWrapper._pending_line. for encoding in encodings: # XXX: str.encode() should return bytes data = bytes(''.join(input_lines).encode(encoding)) for do_reads in (False, True): for bufsize in range(1, 10): for newline, exp_lines in tests: bufio = self.BufferedReader(self.BytesIO(data), bufsize) textio = self.TextIOWrapper(bufio, newline=newline, encoding=encoding) if do_reads: got_lines = [] while True: c2 = textio.read(2) if c2 == '': break self.assertEqual(len(c2), 2) got_lines.append(c2 + textio.readline()) else: got_lines = list(textio) for got_line, exp_line in zip(got_lines, exp_lines): self.assertEqual(got_line, exp_line) self.assertEqual(len(got_lines), len(exp_lines)) def test_newlines_input(self): testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG" normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n") for newline, expected in [ (None, normalized.decode("ascii").splitlines(True)), ("", testdata.decode("ascii").splitlines(True)), ("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]), ("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]), ("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]), ]: buf = self.BytesIO(testdata) txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline) self.assertEqual(txt.readlines(), expected) txt.seek(0) self.assertEqual(txt.read(), "".join(expected)) def test_newlines_output(self): testdict = { "": b"AAA\nBBB\nCCC\nX\rY\r\nZ", "\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ", "\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ", "\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ", } tests = [(None, testdict[os.linesep])] + sorted(testdict.items()) for newline, expected in tests: buf = self.BytesIO() txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline) txt.write("AAA\nB") txt.write("BB\nCCC\n") txt.write("X\rY\r\nZ") txt.flush() self.assertEqual(buf.closed, False) self.assertEqual(buf.getvalue(), expected) def test_destructor(self): l = [] base = self.BytesIO class MyBytesIO(base): def close(self): l.append(self.getvalue()) base.close(self) b = MyBytesIO() t = self.TextIOWrapper(b, encoding="ascii") t.write("abc") del t support.gc_collect() self.assertEqual([b"abc"], l) def test_override_destructor(self): record = [] class MyTextIO(self.TextIOWrapper): def __del__(self): record.append(1) try: f = super(MyTextIO, self).__del__ except AttributeError: pass else: f() def close(self): record.append(2) super(MyTextIO, self).close() def flush(self): record.append(3) super(MyTextIO, self).flush() b = self.BytesIO() t = MyTextIO(b, encoding="ascii") del t support.gc_collect() self.assertEqual(record, [1, 2, 3]) def test_error_through_destructor(self): # Test that the exception state is not modified by a destructor, # even if close() fails. rawio = self.CloseFailureIO() def f(): self.TextIOWrapper(rawio).xyzzy with support.captured_output("stderr") as s: self.assertRaises(AttributeError, f) s = s.getvalue().strip() if s: # The destructor *may* have printed an unraisable error, check it self.assertEqual(len(s.splitlines()), 1) self.assertTrue(s.startswith("Exception IOError: "), s) self.assertTrue(s.endswith(" ignored"), s) # Systematic tests of the text I/O API def test_basic_io(self): for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65): for enc in "ascii", "latin1", "utf8" :# , "utf-16-be", "utf-16-le": f = self.open(support.TESTFN, "w+", encoding=enc) f._CHUNK_SIZE = chunksize self.assertEqual(f.write("abc"), 3) f.close() f = self.open(support.TESTFN, "r+", encoding=enc) f._CHUNK_SIZE = chunksize self.assertEqual(f.tell(), 0) self.assertEqual(f.read(), "abc") cookie = f.tell() self.assertEqual(f.seek(0), 0) self.assertEqual(f.read(None), "abc") f.seek(0) self.assertEqual(f.read(2), "ab") self.assertEqual(f.read(1), "c") self.assertEqual(f.read(1), "") self.assertEqual(f.read(), "") self.assertEqual(f.tell(), cookie) self.assertEqual(f.seek(0), 0) self.assertEqual(f.seek(0, 2), cookie) self.assertEqual(f.write("def"), 3) self.assertEqual(f.seek(cookie), cookie) self.assertEqual(f.read(), "def") if enc.startswith("utf"): self.multi_line_test(f, enc) f.close() def multi_line_test(self, f, enc): f.seek(0) f.truncate() sample = "s\xff\u0fff\uffff" wlines = [] for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000): chars = [] for i in range(size): chars.append(sample[i % len(sample)]) line = "".join(chars) + "\n" wlines.append((f.tell(), line)) f.write(line) f.seek(0) rlines = [] while True: pos = f.tell() line = f.readline() if not line: break rlines.append((pos, line)) self.assertEqual(rlines, wlines) def test_telling(self): f = self.open(support.TESTFN, "w+", encoding="utf8") p0 = f.tell() f.write("\xff\n") p1 = f.tell() f.write("\xff\n") p2 = f.tell() f.seek(0) self.assertEqual(f.tell(), p0) self.assertEqual(f.readline(), "\xff\n") self.assertEqual(f.tell(), p1) self.assertEqual(f.readline(), "\xff\n") self.assertEqual(f.tell(), p2) f.seek(0) for line in f: self.assertEqual(line, "\xff\n") self.assertRaises(IOError, f.tell) self.assertEqual(f.tell(), p2) f.close() def test_seeking(self): chunk_size = _default_chunk_size() prefix_size = chunk_size - 2 u_prefix = "a" * prefix_size prefix = bytes(u_prefix.encode("utf-8")) self.assertEqual(len(u_prefix), len(prefix)) u_suffix = "\u8888\n" suffix = bytes(u_suffix.encode("utf-8")) line = prefix + suffix f = self.open(support.TESTFN, "wb") f.write(line*2) f.close() f = self.open(support.TESTFN, "r", encoding="utf-8") s = f.read(prefix_size) self.assertEqual(s, prefix.decode("ascii")) self.assertEqual(f.tell(), prefix_size) self.assertEqual(f.readline(), u_suffix) def test_seeking_too(self): # Regression test for a specific bug data = b'\xe0\xbf\xbf\n' f = self.open(support.TESTFN, "wb") f.write(data) f.close() f = self.open(support.TESTFN, "r", encoding="utf-8") f._CHUNK_SIZE # Just test that it exists f._CHUNK_SIZE = 2 f.readline() f.tell() def test_seek_and_tell(self): #Test seek/tell using the StatefulIncrementalDecoder. # Make test faster by doing smaller seeks CHUNK_SIZE = 128 def test_seek_and_tell_with_data(data, min_pos=0): """Tell/seek to various points within a data stream and ensure that the decoded data returned by read() is consistent.""" f = self.open(support.TESTFN, 'wb') f.write(data) f.close() f = self.open(support.TESTFN, encoding='test_decoder') f._CHUNK_SIZE = CHUNK_SIZE decoded = f.read() f.close() for i in range(min_pos, len(decoded) + 1): # seek positions for j in [1, 5, len(decoded) - i]: # read lengths f = self.open(support.TESTFN, encoding='test_decoder') self.assertEqual(f.read(i), decoded[:i]) cookie = f.tell() self.assertEqual(f.read(j), decoded[i:i + j]) f.seek(cookie) self.assertEqual(f.read(), decoded[i:]) f.close() # Enable the test decoder. StatefulIncrementalDecoder.codecEnabled = 1 # Run the tests. try: # Try each test case. for input, _, _ in StatefulIncrementalDecoderTest.test_cases: test_seek_and_tell_with_data(input) # Position each test case so that it crosses a chunk boundary. for input, _, _ in StatefulIncrementalDecoderTest.test_cases: offset = CHUNK_SIZE - len(input)//2 prefix = b'.'*offset # Don't bother seeking into the prefix (takes too long). min_pos = offset*2 test_seek_and_tell_with_data(prefix + input, min_pos) # Ensure our test decoder won't interfere with subsequent tests. finally: StatefulIncrementalDecoder.codecEnabled = 0 def test_encoded_writes(self): data = "1234567890" tests = ("utf-16", "utf-16-le", "utf-16-be", "utf-32", "utf-32-le", "utf-32-be") for encoding in tests: buf = self.BytesIO() f = self.TextIOWrapper(buf, encoding=encoding) # Check if the BOM is written only once (see issue1753). f.write(data) f.write(data) f.seek(0) self.assertEqual(f.read(), data * 2) f.seek(0) self.assertEqual(f.read(), data * 2) self.assertEqual(buf.getvalue(), (data * 2).encode(encoding)) def test_unreadable(self): class UnReadable(self.BytesIO): def readable(self): return False txt = self.TextIOWrapper(UnReadable()) self.assertRaises(IOError, txt.read) def test_read_one_by_one(self): txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB")) reads = "" while True: c = txt.read(1) if not c: break reads += c self.assertEqual(reads, "AA\nBB") def test_readlines(self): txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC")) self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"]) txt.seek(0) self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"]) txt.seek(0) self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"]) # read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128. def test_read_by_chunk(self): # make sure "\r\n" straddles 128 char boundary. txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB")) reads = "" while True: c = txt.read(128) if not c: break reads += c self.assertEqual(reads, "A"*127+"\nB") def test_writelines(self): l = ['ab', 'cd', 'ef'] buf = self.BytesIO() txt = self.TextIOWrapper(buf) txt.writelines(l) txt.flush() self.assertEqual(buf.getvalue(), b'abcdef') def test_writelines_userlist(self): l = UserList(['ab', 'cd', 'ef']) buf = self.BytesIO() txt = self.TextIOWrapper(buf) txt.writelines(l) txt.flush() self.assertEqual(buf.getvalue(), b'abcdef') def test_writelines_error(self): txt = self.TextIOWrapper(self.BytesIO()) self.assertRaises(TypeError, txt.writelines, [1, 2, 3]) self.assertRaises(TypeError, txt.writelines, None) self.assertRaises(TypeError, txt.writelines, b'abc') def test_issue1395_1(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") # read one char at a time reads = "" while True: c = txt.read(1) if not c: break reads += c self.assertEqual(reads, self.normalized) def test_issue1395_2(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = "" while True: c = txt.read(4) if not c: break reads += c self.assertEqual(reads, self.normalized) def test_issue1395_3(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = txt.read(4) reads += txt.read(4) reads += txt.readline() reads += txt.readline() reads += txt.readline() self.assertEqual(reads, self.normalized) def test_issue1395_4(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = txt.read(4) reads += txt.read() self.assertEqual(reads, self.normalized) def test_issue1395_5(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = txt.read(4) pos = txt.tell() txt.seek(0) txt.seek(pos) self.assertEqual(txt.read(4), "BBB\n") def test_issue2282(self): buffer = self.BytesIO(self.testdata) txt = self.TextIOWrapper(buffer, encoding="ascii") self.assertEqual(buffer.seekable(), txt.seekable()) def test_append_bom(self): # The BOM is not written again when appending to a non-empty file filename = support.TESTFN for charset in ('utf-8-sig', 'utf-16', 'utf-32'): with self.open(filename, 'w', encoding=charset) as f: f.write('aaa') pos = f.tell() with self.open(filename, 'rb') as f: self.assertEqual(f.read(), 'aaa'.encode(charset)) with self.open(filename, 'a', encoding=charset) as f: f.write('xxx') with self.open(filename, 'rb') as f: self.assertEqual(f.read(), 'aaaxxx'.encode(charset)) def test_seek_bom(self): # Same test, but when seeking manually filename = support.TESTFN for charset in ('utf-8-sig', 'utf-16', 'utf-32'): with self.open(filename, 'w', encoding=charset) as f: f.write('aaa') pos = f.tell() with self.open(filename, 'r+', encoding=charset) as f: f.seek(pos) f.write('zzz') f.seek(0) f.write('bbb') with self.open(filename, 'rb') as f: self.assertEqual(f.read(), 'bbbzzz'.encode(charset)) def test_errors_property(self): with self.open(support.TESTFN, "w") as f: self.assertEqual(f.errors, "strict") with self.open(support.TESTFN, "w", errors="replace") as f: self.assertEqual(f.errors, "replace") @unittest.skipUnless(threading, 'Threading required for this test.') def test_threads_write(self): # Issue6750: concurrent writes could duplicate data event = threading.Event() with self.open(support.TESTFN, "w", buffering=1) as f: def run(n): text = "Thread%03d\n" % n event.wait() f.write(text) threads = [threading.Thread(target=run, args=(x,)) for x in range(20)] with support.start_threads(threads, event.set): time.sleep(0.02) with self.open(support.TESTFN) as f: content = f.read() for n in range(20): self.assertEqual(content.count("Thread%03d\n" % n), 1) def test_flush_error_on_close(self): # Test that text file is closed despite failed flush # and that flush() is called before file closed. txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") closed = [] def bad_flush(): closed[:] = [txt.closed, txt.buffer.closed] raise IOError() txt.flush = bad_flush self.assertRaises(IOError, txt.close) # exception not swallowed self.assertTrue(txt.closed) self.assertTrue(txt.buffer.closed) self.assertTrue(closed) # flush() called self.assertFalse(closed[0]) # flush() called before file closed self.assertFalse(closed[1]) txt.flush = lambda: None # break reference loop def test_multi_close(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt.close() txt.close() txt.close() self.assertRaises(ValueError, txt.flush) def test_readonly_attributes(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") buf = self.BytesIO(self.testdata) with self.assertRaises((AttributeError, TypeError)): txt.buffer = buf def test_read_nonbytes(self): # Issue #17106 # Crash when underlying read() returns non-bytes class NonbytesStream(self.StringIO): read1 = self.StringIO.read class NonbytesStream(self.StringIO): read1 = self.StringIO.read t = self.TextIOWrapper(NonbytesStream('a')) with self.maybeRaises(TypeError): t.read(1) t = self.TextIOWrapper(NonbytesStream('a')) with self.maybeRaises(TypeError): t.readline() t = self.TextIOWrapper(NonbytesStream('a')) self.assertEqual(t.read(), u'a') def test_illegal_decoder(self): # Issue #17106 # Crash when decoder returns non-string t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n', encoding='quopri_codec') with self.maybeRaises(TypeError): t.read(1) t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n', encoding='quopri_codec') with self.maybeRaises(TypeError): t.readline() t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n', encoding='quopri_codec') with self.maybeRaises(TypeError): t.read() class CTextIOWrapperTest(TextIOWrapperTest): def test_initialization(self): r = self.BytesIO(b"\xc3\xa9\n\n") b = self.BufferedReader(r, 1000) t = self.TextIOWrapper(b) self.assertRaises(TypeError, t.__init__, b, newline=42) self.assertRaises(ValueError, t.read) self.assertRaises(ValueError, t.__init__, b, newline='xyzzy') self.assertRaises(ValueError, t.read) t = self.TextIOWrapper.__new__(self.TextIOWrapper) self.assertRaises(Exception, repr, t) def test_garbage_collection(self): # C TextIOWrapper objects are collected, and collecting them flushes # all data to disk. # The Python version has __del__, so it ends in gc.garbage instead. rawio = io.FileIO(support.TESTFN, "wb") b = self.BufferedWriter(rawio) t = self.TextIOWrapper(b, encoding="ascii") t.write("456def") t.x = t wr = weakref.ref(t) del t support.gc_collect() self.assertTrue(wr() is None, wr) with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"456def") def test_rwpair_cleared_before_textio(self): # Issue 13070: TextIOWrapper's finalization would crash when called # after the reference to the underlying BufferedRWPair's writer got # cleared by the GC. for i in range(1000): b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()) t1 = self.TextIOWrapper(b1, encoding="ascii") b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()) t2 = self.TextIOWrapper(b2, encoding="ascii") # circular references t1.buddy = t2 t2.buddy = t1 support.gc_collect() maybeRaises = unittest.TestCase.assertRaises class PyTextIOWrapperTest(TextIOWrapperTest): @contextlib.contextmanager def maybeRaises(self, *args, **kwds): yield class IncrementalNewlineDecoderTest(unittest.TestCase): def check_newline_decoding_utf8(self, decoder): # UTF-8 specific tests for a newline decoder def _check_decode(b, s, **kwargs): # We exercise getstate() / setstate() as well as decode() state = decoder.getstate() self.assertEqual(decoder.decode(b, **kwargs), s) decoder.setstate(state) self.assertEqual(decoder.decode(b, **kwargs), s) _check_decode(b'\xe8\xa2\x88', "\u8888") _check_decode(b'\xe8', "") _check_decode(b'\xa2', "") _check_decode(b'\x88', "\u8888") _check_decode(b'\xe8', "") _check_decode(b'\xa2', "") _check_decode(b'\x88', "\u8888") _check_decode(b'\xe8', "") self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True) decoder.reset() _check_decode(b'\n', "\n") _check_decode(b'\r', "") _check_decode(b'', "\n", final=True) _check_decode(b'\r', "\n", final=True) _check_decode(b'\r', "") _check_decode(b'a', "\na") _check_decode(b'\r\r\n', "\n\n") _check_decode(b'\r', "") _check_decode(b'\r', "\n") _check_decode(b'\na', "\na") _check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n") _check_decode(b'\xe8\xa2\x88', "\u8888") _check_decode(b'\n', "\n") _check_decode(b'\xe8\xa2\x88\r', "\u8888") _check_decode(b'\n', "\n") def check_newline_decoding(self, decoder, encoding): result = [] if encoding is not None: encoder = codecs.getincrementalencoder(encoding)() def _decode_bytewise(s): # Decode one byte at a time for b in encoder.encode(s): result.append(decoder.decode(b)) else: encoder = None def _decode_bytewise(s): # Decode one char at a time for c in s: result.append(decoder.decode(c)) self.assertEqual(decoder.newlines, None) _decode_bytewise("abc\n\r") self.assertEqual(decoder.newlines, '\n') _decode_bytewise("\nabc") self.assertEqual(decoder.newlines, ('\n', '\r\n')) _decode_bytewise("abc\r") self.assertEqual(decoder.newlines, ('\n', '\r\n')) _decode_bytewise("abc") self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n')) _decode_bytewise("abc\r") self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc") decoder.reset() input = "abc" if encoder is not None: encoder.reset() input = encoder.encode(input) self.assertEqual(decoder.decode(input), "abc") self.assertEqual(decoder.newlines, None) def test_newline_decoder(self): encodings = ( # None meaning the IncrementalNewlineDecoder takes unicode input # rather than bytes input None, 'utf-8', 'latin-1', 'utf-16', 'utf-16-le', 'utf-16-be', 'utf-32', 'utf-32-le', 'utf-32-be', ) for enc in encodings: decoder = enc and codecs.getincrementaldecoder(enc)() decoder = self.IncrementalNewlineDecoder(decoder, translate=True) self.check_newline_decoding(decoder, enc) decoder = codecs.getincrementaldecoder("utf-8")() decoder = self.IncrementalNewlineDecoder(decoder, translate=True) self.check_newline_decoding_utf8(decoder) def test_newline_bytes(self): # Issue 5433: Excessive optimization in IncrementalNewlineDecoder def _check(dec): self.assertEqual(dec.newlines, None) self.assertEqual(dec.decode("\u0D00"), "\u0D00") self.assertEqual(dec.newlines, None) self.assertEqual(dec.decode("\u0A00"), "\u0A00") self.assertEqual(dec.newlines, None) dec = self.IncrementalNewlineDecoder(None, translate=False) _check(dec) dec = self.IncrementalNewlineDecoder(None, translate=True) _check(dec) class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest): pass class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest): pass # XXX Tests for open() class MiscIOTest(unittest.TestCase): def tearDown(self): support.unlink(support.TESTFN) def test___all__(self): for name in self.io.__all__: obj = getattr(self.io, name, None) self.assertTrue(obj is not None, name) if name == "open": continue elif "error" in name.lower() or name == "UnsupportedOperation": self.assertTrue(issubclass(obj, Exception), name) elif not name.startswith("SEEK_"): self.assertTrue(issubclass(obj, self.IOBase)) def test_attributes(self): f = self.open(support.TESTFN, "wb", buffering=0) self.assertEqual(f.mode, "wb") f.close() f = self.open(support.TESTFN, "U") self.assertEqual(f.name, support.TESTFN) self.assertEqual(f.buffer.name, support.TESTFN) self.assertEqual(f.buffer.raw.name, support.TESTFN) self.assertEqual(f.mode, "U") self.assertEqual(f.buffer.mode, "rb") self.assertEqual(f.buffer.raw.mode, "rb") f.close() f = self.open(support.TESTFN, "w+") self.assertEqual(f.mode, "w+") self.assertEqual(f.buffer.mode, "rb+") # Does it really matter? self.assertEqual(f.buffer.raw.mode, "rb+") g = self.open(f.fileno(), "wb", closefd=False) self.assertEqual(g.mode, "wb") self.assertEqual(g.raw.mode, "wb") self.assertEqual(g.name, f.fileno()) self.assertEqual(g.raw.name, f.fileno()) f.close() g.close() def test_io_after_close(self): for kwargs in [ {"mode": "w"}, {"mode": "wb"}, {"mode": "w", "buffering": 1}, {"mode": "w", "buffering": 2}, {"mode": "wb", "buffering": 0}, {"mode": "r"}, {"mode": "rb"}, {"mode": "r", "buffering": 1}, {"mode": "r", "buffering": 2}, {"mode": "rb", "buffering": 0}, {"mode": "w+"}, {"mode": "w+b"}, {"mode": "w+", "buffering": 1}, {"mode": "w+", "buffering": 2}, {"mode": "w+b", "buffering": 0}, ]: f = self.open(support.TESTFN, **kwargs) f.close() self.assertRaises(ValueError, f.flush) self.assertRaises(ValueError, f.fileno) self.assertRaises(ValueError, f.isatty) self.assertRaises(ValueError, f.__iter__) if hasattr(f, "peek"): self.assertRaises(ValueError, f.peek, 1) self.assertRaises(ValueError, f.read) if hasattr(f, "read1"): self.assertRaises(ValueError, f.read1, 1024) if hasattr(f, "readall"): self.assertRaises(ValueError, f.readall) if hasattr(f, "readinto"): self.assertRaises(ValueError, f.readinto, bytearray(1024)) self.assertRaises(ValueError, f.readline) self.assertRaises(ValueError, f.readlines) self.assertRaises(ValueError, f.seek, 0) self.assertRaises(ValueError, f.tell) self.assertRaises(ValueError, f.truncate) self.assertRaises(ValueError, f.write, b"" if "b" in kwargs['mode'] else "") self.assertRaises(ValueError, f.writelines, []) self.assertRaises(ValueError, next, f) def test_blockingioerror(self): # Various BlockingIOError issues self.assertRaises(TypeError, self.BlockingIOError) self.assertRaises(TypeError, self.BlockingIOError, 1) self.assertRaises(TypeError, self.BlockingIOError, 1, 2, 3, 4) self.assertRaises(TypeError, self.BlockingIOError, 1, "", None) b = self.BlockingIOError(1, "") self.assertEqual(b.characters_written, 0) class C(unicode): pass c = C("") b = self.BlockingIOError(1, c) c.b = b b.c = c wr = weakref.ref(c) del c, b support.gc_collect() self.assertTrue(wr() is None, wr) def test_abcs(self): # Test the visible base classes are ABCs. self.assertIsInstance(self.IOBase, abc.ABCMeta) self.assertIsInstance(self.RawIOBase, abc.ABCMeta) self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta) self.assertIsInstance(self.TextIOBase, abc.ABCMeta) def _check_abc_inheritance(self, abcmodule): with self.open(support.TESTFN, "wb", buffering=0) as f: self.assertIsInstance(f, abcmodule.IOBase) self.assertIsInstance(f, abcmodule.RawIOBase) self.assertNotIsInstance(f, abcmodule.BufferedIOBase) self.assertNotIsInstance(f, abcmodule.TextIOBase) with self.open(support.TESTFN, "wb") as f: self.assertIsInstance(f, abcmodule.IOBase) self.assertNotIsInstance(f, abcmodule.RawIOBase) self.assertIsInstance(f, abcmodule.BufferedIOBase) self.assertNotIsInstance(f, abcmodule.TextIOBase) with self.open(support.TESTFN, "w") as f: self.assertIsInstance(f, abcmodule.IOBase) self.assertNotIsInstance(f, abcmodule.RawIOBase) self.assertNotIsInstance(f, abcmodule.BufferedIOBase) self.assertIsInstance(f, abcmodule.TextIOBase) def test_abc_inheritance(self): # Test implementations inherit from their respective ABCs self._check_abc_inheritance(self) def test_abc_inheritance_official(self): # Test implementations inherit from the official ABCs of the # baseline "io" module. self._check_abc_inheritance(io) @unittest.skipUnless(fcntl, 'fcntl required for this test') def test_nonblock_pipe_write_bigbuf(self): self._test_nonblock_pipe_write(16*1024) @unittest.skipUnless(fcntl, 'fcntl required for this test') def test_nonblock_pipe_write_smallbuf(self): self._test_nonblock_pipe_write(1024) def _set_non_blocking(self, fd): flags = fcntl.fcntl(fd, fcntl.F_GETFL) self.assertNotEqual(flags, -1) res = fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) self.assertEqual(res, 0) def _test_nonblock_pipe_write(self, bufsize): sent = [] received = [] r, w = os.pipe() self._set_non_blocking(r) self._set_non_blocking(w) # To exercise all code paths in the C implementation we need # to play with buffer sizes. For instance, if we choose a # buffer size less than or equal to _PIPE_BUF (4096 on Linux) # then we will never get a partial write of the buffer. rf = self.open(r, mode='rb', closefd=True, buffering=bufsize) wf = self.open(w, mode='wb', closefd=True, buffering=bufsize) with rf, wf: for N in 9999, 73, 7574: try: i = 0 while True: msg = bytes([i % 26 + 97]) * N sent.append(msg) wf.write(msg) i += 1 except self.BlockingIOError as e: self.assertEqual(e.args[0], errno.EAGAIN) sent[-1] = sent[-1][:e.characters_written] received.append(rf.read()) msg = b'BLOCKED' wf.write(msg) sent.append(msg) while True: try: wf.flush() break except self.BlockingIOError as e: self.assertEqual(e.args[0], errno.EAGAIN) self.assertEqual(e.characters_written, 0) received.append(rf.read()) received += iter(rf.read, None) sent, received = b''.join(sent), b''.join(received) self.assertTrue(sent == received) self.assertTrue(wf.closed) self.assertTrue(rf.closed) class CMiscIOTest(MiscIOTest): io = io class PyMiscIOTest(MiscIOTest): io = pyio @unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.') class SignalsTest(unittest.TestCase): def setUp(self): self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt) def tearDown(self): signal.signal(signal.SIGALRM, self.oldalrm) def alarm_interrupt(self, sig, frame): 1 // 0 @unittest.skipUnless(threading, 'Threading required for this test.') @unittest.skipIf(sys.platform in ('freebsd5', 'freebsd6', 'freebsd7'), 'issue #12429: skip test on FreeBSD <= 7') def check_interrupted_write(self, item, bytes, **fdopen_kwargs): """Check that a partial write, when it gets interrupted, properly invokes the signal handler, and bubbles up the exception raised in the latter.""" read_results = [] def _read(): s = os.read(r, 1) read_results.append(s) t = threading.Thread(target=_read) t.daemon = True r, w = os.pipe() try: wio = self.io.open(w, **fdopen_kwargs) t.start() signal.alarm(1) # Fill the pipe enough that the write will be blocking. # It will be interrupted by the timer armed above. Since the # other thread has read one byte, the low-level write will # return with a successful (partial) result rather than an EINTR. # The buffered IO layer must check for pending signal # handlers, which in this case will invoke alarm_interrupt(). try: with self.assertRaises(ZeroDivisionError): wio.write(item * (support.PIPE_MAX_SIZE // len(item) + 1)) finally: t.join() # We got one byte, get another one and check that it isn't a # repeat of the first one. read_results.append(os.read(r, 1)) self.assertEqual(read_results, [bytes[0:1], bytes[1:2]]) finally: os.close(w) os.close(r) # This is deliberate. If we didn't close the file descriptor # before closing wio, wio would try to flush its internal # buffer, and block again. try: wio.close() except IOError as e: if e.errno != errno.EBADF: raise def test_interrupted_write_unbuffered(self): self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0) def test_interrupted_write_buffered(self): self.check_interrupted_write(b"xy", b"xy", mode="wb") def test_interrupted_write_text(self): self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii") def check_reentrant_write(self, data, **fdopen_kwargs): def on_alarm(*args): # Will be called reentrantly from the same thread wio.write(data) 1//0 signal.signal(signal.SIGALRM, on_alarm) r, w = os.pipe() wio = self.io.open(w, **fdopen_kwargs) try: signal.alarm(1) # Either the reentrant call to wio.write() fails with RuntimeError, # or the signal handler raises ZeroDivisionError. with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm: while 1: for i in range(100): wio.write(data) wio.flush() # Make sure the buffer doesn't fill up and block further writes os.read(r, len(data) * 100) exc = cm.exception if isinstance(exc, RuntimeError): self.assertTrue(str(exc).startswith("reentrant call"), str(exc)) finally: wio.close() os.close(r) def test_reentrant_write_buffered(self): self.check_reentrant_write(b"xy", mode="wb") def test_reentrant_write_text(self): self.check_reentrant_write("xy", mode="w", encoding="ascii") def check_interrupted_read_retry(self, decode, **fdopen_kwargs): """Check that a buffered read, when it gets interrupted (either returning a partial result or EINTR), properly invokes the signal handler and retries if the latter returned successfully.""" r, w = os.pipe() fdopen_kwargs["closefd"] = False def alarm_handler(sig, frame): os.write(w, b"bar") signal.signal(signal.SIGALRM, alarm_handler) try: rio = self.io.open(r, **fdopen_kwargs) os.write(w, b"foo") signal.alarm(1) # Expected behaviour: # - first raw read() returns partial b"foo" # - second raw read() returns EINTR # - third raw read() returns b"bar" self.assertEqual(decode(rio.read(6)), "foobar") finally: rio.close() os.close(w) os.close(r) def test_interrupterd_read_retry_buffered(self): self.check_interrupted_read_retry(lambda x: x.decode('latin1'), mode="rb") def test_interrupterd_read_retry_text(self): self.check_interrupted_read_retry(lambda x: x, mode="r") @unittest.skipUnless(threading, 'Threading required for this test.') def check_interrupted_write_retry(self, item, **fdopen_kwargs): """Check that a buffered write, when it gets interrupted (either returning a partial result or EINTR), properly invokes the signal handler and retries if the latter returned successfully.""" select = support.import_module("select") # A quantity that exceeds the buffer size of an anonymous pipe's # write end. N = support.PIPE_MAX_SIZE r, w = os.pipe() fdopen_kwargs["closefd"] = False # We need a separate thread to read from the pipe and allow the # write() to finish. This thread is started after the SIGALRM is # received (forcing a first EINTR in write()). read_results = [] write_finished = False error = [None] def _read(): try: while not write_finished: while r in select.select([r], [], [], 1.0)[0]: s = os.read(r, 1024) read_results.append(s) except BaseException as exc: error[0] = exc t = threading.Thread(target=_read) t.daemon = True def alarm1(sig, frame): signal.signal(signal.SIGALRM, alarm2) signal.alarm(1) def alarm2(sig, frame): t.start() signal.signal(signal.SIGALRM, alarm1) try: wio = self.io.open(w, **fdopen_kwargs) signal.alarm(1) # Expected behaviour: # - first raw write() is partial (because of the limited pipe buffer # and the first alarm) # - second raw write() returns EINTR (because of the second alarm) # - subsequent write()s are successful (either partial or complete) self.assertEqual(N, wio.write(item * N)) wio.flush() write_finished = True t.join() self.assertIsNone(error[0]) self.assertEqual(N, sum(len(x) for x in read_results)) finally: write_finished = True os.close(w) os.close(r) # This is deliberate. If we didn't close the file descriptor # before closing wio, wio would try to flush its internal # buffer, and could block (in case of failure). try: wio.close() except IOError as e: if e.errno != errno.EBADF: raise def test_interrupterd_write_retry_buffered(self): self.check_interrupted_write_retry(b"x", mode="wb") def test_interrupterd_write_retry_text(self): self.check_interrupted_write_retry("x", mode="w", encoding="latin1") class CSignalsTest(SignalsTest): io = io class PySignalsTest(SignalsTest): io = pyio # Handling reentrancy issues would slow down _pyio even more, so the # tests are disabled. test_reentrant_write_buffered = None test_reentrant_write_text = None def test_main(): tests = (CIOTest, PyIOTest, CBufferedReaderTest, PyBufferedReaderTest, CBufferedWriterTest, PyBufferedWriterTest, CBufferedRWPairTest, PyBufferedRWPairTest, CBufferedRandomTest, PyBufferedRandomTest, StatefulIncrementalDecoderTest, CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest, CTextIOWrapperTest, PyTextIOWrapperTest, CMiscIOTest, PyMiscIOTest, CSignalsTest, PySignalsTest, ) # Put the namespaces of the IO module we are testing and some useful mock # classes in the __dict__ of each test. mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO, MockNonBlockWriterIO, MockRawIOWithoutRead) all_members = io.__all__ + ["IncrementalNewlineDecoder"] c_io_ns = dict((name, getattr(io, name)) for name in all_members) py_io_ns = dict((name, getattr(pyio, name)) for name in all_members) globs = globals() c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks) py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks) # Avoid turning open into a bound method. py_io_ns["open"] = pyio.OpenWrapper for test in tests: if test.__name__.startswith("C"): for name, obj in c_io_ns.items(): setattr(test, name, obj) elif test.__name__.startswith("Py"): for name, obj in py_io_ns.items(): setattr(test, name, obj) support.run_unittest(*tests) if __name__ == "__main__": test_main()<|fim▁end|>
<|file_name|>config.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
global mods mods = []
<|file_name|>TPlane.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # This simple example shows how to do basic texture mapping. import vtk from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Load in the texture map. A texture is any unsigned char image. If it # is not of this type, you will have to map it through a lookup table # or by using vtkImageShiftScale. bmpReader = vtk.vtkBMPReader() bmpReader.SetFileName(VTK_DATA_ROOT + "/Data/masonry.bmp") atext = vtk.vtkTexture() atext.SetInputConnection(bmpReader.GetOutputPort()) atext.InterpolateOn() # Create a plane source and actor. The vtkPlanesSource generates # texture coordinates. plane = vtk.vtkPlaneSource() planeMapper = vtk.vtkPolyDataMapper() planeMapper.SetInputConnection(plane.GetOutputPort()) planeActor = vtk.vtkActor() planeActor.SetMapper(planeMapper) planeActor.SetTexture(atext) # Create the RenderWindow, Renderer and both Actors ren = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow()<|fim▁hole|>iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # Add the actors to the renderer, set the background and size ren.AddActor(planeActor) ren.SetBackground(0.1, 0.2, 0.4) renWin.SetSize(500, 500) ren.ResetCamera() cam1 = ren.GetActiveCamera() cam1.Elevation(-30) cam1.Roll(-20) ren.ResetCameraClippingRange() iren.Initialize() renWin.Render() iren.Start()<|fim▁end|>
renWin.AddRenderer(ren)
<|file_name|>index.d.ts<|end_file_name|><|fim▁begin|>// Type definitions for Numeral.js // Project: https://github.com/adamwdraper/Numeral-js // Definitions by: Vincent Bortone <https://github.com/vbortone> // Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped // http://numeraljs.com/#locales interface NumeralJSLocale { delimiters: { thousands: string; decimal: string; }; abbreviations: { thousand: string; million: string; billion: string; trillion: string; }; ordinal(num: number): string; currency: { symbol: string; }; } type RoundingFunction = (value: number) => number; // http://numeraljs.com/#custom-formats<|fim▁hole|> format: RegExp, unformat: RegExp, }, format: (value: any, format: string, roundingFunction: RoundingFunction) => string, unformat: (value: string) => number } type RegisterType = 'format' | 'locale'; // http://numeraljs.com/#use-it interface Numeral { (value?: any): Numeral; version: string; isNumeral: boolean; /** * This function sets the current locale. If no arguments are passed in, * it will simply return the current global locale key. */ locale(key?: string): string; /** * Registers a language definition or a custom format definition. * * @param what Allowed values are: either 'format' or 'locale' * @param key The key of the registerd type, e.g. 'de' for a german locale definition * @param value The locale definition or the format definitiion */ register(what: RegisterType, key: string, value: NumeralJSLocale | NumeralJsFormat): NumeralJSLocale | NumeralJsFormat; zeroFormat(format: string): void; nullFormat(format: string): void; defaultFormat(format: string): void; clone(): Numeral; format(inputString?: string, roundingFunction?: RoundingFunction): string; formatCurrency(inputString?: string): string; unformat(inputString: string): number; value(): number; valueOf(): number; set (value: any): Numeral; add(value: any): Numeral; subtract(value: any): Numeral; multiply(value: any): Numeral; divide(value: any): Numeral; difference(value: any): number; validate(value: any, culture: any): boolean; } declare var numeral: Numeral; /** * Usage: <code>import * as numeral from 'numeral'</code> */ declare module "numeral" { export = numeral; }<|fim▁end|>
interface NumeralJsFormat { regexps: {
<|file_name|>unique-deref.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed<|fim▁hole|>// except according to those terms. #![allow(unknown_features)] #![feature(box_syntax)] pub fn main() { let i: Box<_> = box 100; assert_eq!(*i, 100); }<|fim▁end|>
<|file_name|>content.py<|end_file_name|><|fim▁begin|>from gettext import gettext as _ from pulp.client.commands import options from pulp.client.commands.criteria import DisplayUnitAssociationsCommand from pulp.client.commands.unit import UnitCopyCommand, UnitRemoveCommand from pulp_docker.common import constants DESC_COPY_MANIFESTS = _('copies manifests from one repository into another') DESC_COPY_TAGS = _('copies tags from one repository into another') DESC_REMOVE_MANIFESTS = _('remove manifests from a repository') DESC_REMOVE_TAGS = _('remove tags from a repository') DESC_SEARCH_MANIFESTS = _('search for manifests in a repository') DESC_SEARCH_TAGS = _('search for tags in a repository') FORMAT_ERR = _('The docker formatter can not process %s units.') MANIFEST_AND_BLOB_TEMPLATE = '%(digest)s' TAG_TEMPLATE = '%(name)s' def get_formatter_for_type(type_id): """ Returns a formatter that can be used to format the unit key of a docker tag, manifest, or blob for display purposes. :param type_id: A unit type ID. :type type_id: str :return: A formatter. :rtype: callable :raises ValueError: when the type_id is not supported. """ if type_id == constants.TAG_TYPE_ID: return lambda u: TAG_TEMPLATE % u elif type_id in [constants.MANIFEST_TYPE_ID, constants.BLOB_TYPE_ID]: return lambda u: MANIFEST_AND_BLOB_TEMPLATE % u else: raise ValueError(FORMAT_ERR % type_id) class TagSearchCommand(DisplayUnitAssociationsCommand): """ Command used to search for tag units in a repository. """ def __init__(self, context): """ :param context: A client context. :type context: pulp.client.extensions.core.ClientContext """ super(TagSearchCommand, self).__init__( name='tag', description=DESC_SEARCH_TAGS, method=self.run) self.context = context self.prompt = context.prompt def run(self, **kwargs):<|fim▁hole|> :param kwargs: the search parameters for finding docker tags :type kwargs: dict """ repo_id = kwargs.pop(options.OPTION_REPO_ID.keyword) kwargs['type_ids'] = [constants.TAG_TYPE_ID] reply = self.context.server.repo_unit.search(repo_id, **kwargs) tags = reply.response_body self.prompt.render_document_list(tags) class TagCopyCommand(UnitCopyCommand): """ Command used to copy tag units between repositories. """ def __init__(self, context): """ :param context: A client context. :type context: pulp.client.extensions.core.ClientContext """ super(TagCopyCommand, self).__init__( context, name='tag', description=DESC_COPY_TAGS, method=self.run, type_id=constants.TAG_TYPE_ID) def get_formatter_for_type(self, type_id): """ Returns a formatter that can be used to format the unit key of a docker tag or blob for display purposes. :param type_id: A unit type ID. :type type_id: str :return: A formatter. :rtype: callable :raises ValueError: when the type_id is not supported. """ return get_formatter_for_type(type_id) class TagRemoveCommand(UnitRemoveCommand): """ Command used to remove tag units from a repository. """ def __init__(self, context): """ :param context: A client context. :type context: pulp.client.extensions.core.ClientContext """ super(TagRemoveCommand, self).__init__( name='tag', description=DESC_REMOVE_TAGS, context=context, method=self.run, type_id=constants.TAG_TYPE_ID) def get_formatter_for_type(self, type_id): """ Returns a formatter that can be used to format the unit key of a docker tag or blob for display purposes. :param type_id: A unit type ID. :type type_id: str :return: A formatter. :rtype: callable :raises ValueError: when the type_id is not supported. """ return get_formatter_for_type(type_id) class ManifestSearchCommand(DisplayUnitAssociationsCommand): """ Command used to search for manifest units in a repository. """ def __init__(self, context): """ :param context: A client context. :type context: pulp.client.extensions.core.ClientContext """ super(ManifestSearchCommand, self).__init__( name='manifest', description=DESC_SEARCH_MANIFESTS, method=self.run) self.context = context self.prompt = context.prompt def run(self, **kwargs): """ Print a list of all the manifests matching the search parameters. :param kwargs: the search parameters for finding docker manifests :type kwargs: dict """ repo_id = kwargs.pop(options.OPTION_REPO_ID.keyword) kwargs['type_ids'] = [constants.MANIFEST_TYPE_ID] reply = self.context.server.repo_unit.search(repo_id, **kwargs) manifests = reply.response_body self.prompt.render_document_list(manifests) class ManifestCopyCommand(UnitCopyCommand): """ Command used to copy manifest units between repositories. """ def __init__(self, context): """ :param context: A client context. :type context: pulp.client.extensions.core.ClientContext """ super(ManifestCopyCommand, self).__init__( context, name='manifest', description=DESC_COPY_MANIFESTS, method=self.run, type_id=constants.MANIFEST_TYPE_ID) def get_formatter_for_type(self, type_id): """ Returns a formatter that can be used to format the unit key of a docker manifest or blob for display purposes. :param type_id: A unit type ID. :type type_id: str :return: A formatter. :rtype: callable :raises ValueError: when the type_id is not supported. """ return get_formatter_for_type(type_id) class ManifestRemoveCommand(UnitRemoveCommand): """ Command used to remove manifest units from a repository. """ def __init__(self, context): """ :param context: A client context. :type context: pulp.client.extensions.core.ClientContext """ super(ManifestRemoveCommand, self).__init__( name='manifest', description=DESC_REMOVE_MANIFESTS, context=context, method=self.run, type_id=constants.MANIFEST_TYPE_ID) def get_formatter_for_type(self, type_id): """ Returns a formatter that can be used to format the unit key of a docker manifest or blob for display purposes. :param type_id: A unit type ID. :type type_id: str :return: A formatter. :rtype: callable :raises ValueError: when the type_id is not supported. """ return get_formatter_for_type(type_id)<|fim▁end|>
""" Print a list of all the tags matching the search parameters.
<|file_name|>zyx.py<|end_file_name|><|fim▁begin|>""" /****************************************************************************** This source file is part of the Avogadro project. Copyright 2013 Kitware, Inc. This source code is released under the New BSD License, (the "License"). Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and<|fim▁hole|>""" import argparse import json import sys def getMetaData(): metaData = {} metaData['inputFormat'] = 'xyz' metaData['outputFormat'] = 'xyz' metaData['operations'] = ['read', 'write'] metaData['identifier'] = 'ZYX Example Format' metaData['name'] = 'ZYX' metaData['description'] = "Mostly useless file format that reads xyz-style " +\ "files with reversed coordinates. Demonstrates " +\ "the implementation of a user-scripted file format." metaData['fileExtensions'] = ['zyx'] metaData['mimeTypes'] = ['chemical/x-zyx'] return metaData def write(): result = "" # Just copy the first two lines: numAtoms and comment/title result += sys.stdin.readline() result += sys.stdin.readline() for line in sys.stdin: words = line.split() result += '%-3s %9.5f %9.5f %9.5f' %\ (words[0], float(words[3]), float(words[2]), float(words[1])) if len(words) > 4: result += words[4:].join(' ') result += '\n' return result def read(): result = "" # Just copy the first two lines: numAtoms and comment/title result += sys.stdin.readline() result += sys.stdin.readline() for line in sys.stdin: words = line.split() result += '%-3s %9.5f %9.5f %9.5f' %\ (words[0], float(words[3]), float(words[2]), float(words[1])) if len(words) > 4: result += words[4:].join(' ') result += '\n' return result if __name__ == "__main__": parser = argparse.ArgumentParser('Example file format script.') parser.add_argument('--metadata', action='store_true') parser.add_argument('--read', action='store_true') parser.add_argument('--write', action='store_true') parser.add_argument('--display-name', action='store_true') parser.add_argument('--lang', nargs='?', default='en') args = vars(parser.parse_args()) if args['metadata']: print(json.dumps(getMetaData())) elif args['display_name']: print(getMetaData()['name']) elif args['read']: print(read()) elif args['write']: print(write())<|fim▁end|>
limitations under the License. ******************************************************************************/
<|file_name|>olLayerScripts.py<|end_file_name|><|fim▁begin|>import re import traceback import os import codecs from urlparse import parse_qs from PyQt4.QtCore import QCoreApplication from qgis.core import (QgsRenderContext, QgsSingleSymbolRendererV2, QgsCategorizedSymbolRendererV2, QgsGraduatedSymbolRendererV2, QgsHeatmapRenderer, QgsCoordinateReferenceSystem, QgsCoordinateTransform, QgsMessageLog) from utils import safeName, is25d, BLEND_MODES from basemaps import basemapOL def writeLayersAndGroups(layers, groups, visible, folder, popup, settings, json, matchCRS, clustered, iface, restrictToExtent, extent): canvas = iface.mapCanvas() basemapList = settings["Appearance"]["Base layer"] basemaps = [basemapOL()[item] for _, item in enumerate(basemapList)] if len(basemapList) > 1: baseGroup = "Base maps" else: baseGroup = "" baseLayer = """var baseLayer = new ol.layer.Group({ 'title': '%s', layers: [%s\n] });""" % (baseGroup, ','.join(basemaps)) layerVars = "" layer_names_id = {} for count, (layer, encode2json, cluster) in enumerate(zip(layers, json, clustered)): layer_names_id[layer.id()] = str(count) try: if is25d(layer, canvas, restrictToExtent, extent): pass else: layerVars += "\n".join([layerToJavascript(iface, layer, encode2json, matchCRS, cluster, restrictToExtent, extent, count)]) except: layerVars += "\n".join([layerToJavascript(iface, layer, encode2json, matchCRS, cluster, restrictToExtent, extent, count)]) groupVars = "" groupedLayers = {} for group, groupLayers in groups.iteritems(): groupVars += ('''var %s = new ol.layer.Group({ layers: [%s], title: "%s"});\n''' % ("group_" + safeName(group), ",".join(["lyr_" + safeName(layer.name()) + layer_names_id[layer.id()] for layer in groupLayers]), group)) for layer in groupLayers: groupedLayers[layer.id()] = safeName(group) mapLayers = ["baseLayer"] usedGroups = [] osmb = "" for count, layer in enumerate(layers): try: renderer = layer.rendererV2() if is25d(layer, canvas, restrictToExtent, extent): shadows = "" renderer = layer.rendererV2() renderContext = QgsRenderContext.fromMapSettings( canvas.mapSettings()) fields = layer.pendingFields() renderer.startRender(renderContext, fields) for feat in layer.getFeatures(): if isinstance(renderer, QgsCategorizedSymbolRendererV2): classAttribute = renderer.classAttribute() attrValue = feat.attribute(classAttribute) catIndex = renderer.categoryIndexForValue(attrValue) categories = renderer.categories() symbol = categories[catIndex].symbol() elif isinstance(renderer, QgsGraduatedSymbolRendererV2): classAttribute = renderer.classAttribute() attrValue = feat.attribute(classAttribute) ranges = renderer.ranges() for range in ranges: if (attrValue >= range.lowerValue() and attrValue <= range.upperValue()): symbol = range.symbol().clone() else: symbol = renderer.symbolForFeature2(feat, renderContext) symbolLayer = symbol.symbolLayer(0) if not symbolLayer.paintEffect().effectList()[0].enabled(): shadows = "'2015-07-15 10:00:00'" renderer.stopRender(renderContext) osmb = """ var osmb = new OSMBuildings(map).date(new Date({shadows})); osmb.set(geojson_{sln}{count});""".format(shadows=shadows, sln=safeName(layer.name()), count=unicode(count)) else: mapLayers.append("lyr_" + safeName(layer.name()) + unicode(count)) except: QgsMessageLog.logMessage(traceback.format_exc(), "qgis2web", level=QgsMessageLog.CRITICAL) mapLayers.append("lyr_" + safeName(layer.name()) + unicode(count)) visibility = "" for layer, v in zip(mapLayers[1:], visible): visibility += "\n".join(["%s.setVisible(%s);" % (layer, unicode(v).lower())]) group_list = ["baseLayer"] if len(basemapList) else [] no_group_list = [] for count, layer in enumerate(layers): try: if is25d(layer, canvas, restrictToExtent, extent): pass else: if layer.id() in groupedLayers: groupName = groupedLayers[layer.id()] if groupName not in usedGroups: group_list.append("group_" + safeName(groupName)) usedGroups.append(groupName) else: no_group_list.append("lyr_" + safeName(layer.name()) + unicode(count)) except: if layer.id() in groupedLayers: groupName = groupedLayers[layer.id()] if groupName not in usedGroups: group_list.append("group_" + safeName(groupName)) usedGroups.append(groupName) else: no_group_list.append("lyr_" + safeName(layer.name()) + unicode(count)) layersList = [] for layer in (group_list + no_group_list): layersList.append(layer) layersListString = "var layersList = [" + ",".join(layersList) + "];" fieldAliases = "" fieldImages = "" fieldLabels = "" blend_mode = "" for count, (layer, labels) in enumerate(zip(layers, popup)): sln = safeName(layer.name()) + unicode(count) if layer.type() == layer.VectorLayer and not is25d(layer, canvas, restrictToExtent, extent): fieldList = layer.pendingFields() aliasFields = "" imageFields = "" labelFields = "" for field, label in zip(labels.keys(), labels.values()): labelFields += "'%(field)s': '%(label)s', " % ( {"field": field, "label": label}) labelFields = "{%(labelFields)s});\n" % ( {"labelFields": labelFields}) labelFields = "lyr_%(name)s.set('fieldLabels', " % ( {"name": sln}) + labelFields fieldLabels += labelFields for f in fieldList: fieldIndex = fieldList.indexFromName(unicode(f.name())) aliasFields += "'%(field)s': '%(alias)s', " % ( {"field": f.name(), "alias": layer.attributeDisplayName(fieldIndex)}) widget = layer.editFormConfig().widgetType(fieldIndex) imageFields += "'%(field)s': '%(image)s', " % ( {"field": f.name(), "image": widget}) aliasFields = "{%(aliasFields)s});\n" % ( {"aliasFields": aliasFields}) aliasFields = "lyr_%(name)s.set('fieldAliases', " % ( {"name": sln}) + aliasFields fieldAliases += aliasFields imageFields = "{%(imageFields)s});\n" % ( {"imageFields": imageFields}) imageFields = "lyr_%(name)s.set('fieldImages', " % ( {"name": sln}) + imageFields fieldImages += imageFields blend_mode = """lyr_%(name)s.on('precompose', function(evt) { evt.context.globalCompositeOperation = '%(blend)s'; });""" % ( {"name": sln, "blend": BLEND_MODES[layer.blendMode()]}) path = os.path.join(folder, "layers", "layers.js") with codecs.open(path, "w", "utf-8") as f: if basemapList: f.write(baseLayer + "\n") f.write(layerVars + "\n") f.write(groupVars + "\n") f.write(visibility + "\n") f.write(layersListString + "\n") f.write(fieldAliases) f.write(fieldImages) f.write(fieldLabels) f.write(blend_mode) return osmb def layerToJavascript(iface, layer, encode2json, matchCRS, cluster, restrictToExtent, extent, count): if layer.hasScaleBasedVisibility(): if layer.minimumScale() != 0: minRes = 1 / ((1 / layer.minimumScale()) * 39.37 * 90.7) minResolution = "\nminResolution:%s,\n" % unicode(minRes) else: minResolution = "" if layer.maximumScale() != 0: maxRes = 1 / ((1 / layer.maximumScale()) * 39.37 * 90.7) maxResolution = "maxResolution:%s,\n" % unicode(maxRes) else: maxResolution = "" else: minResolution = "" maxResolution = "" layerName = safeName(layer.name()) + unicode(count) attrText = layer.attribution() attrUrl = layer.attributionUrl() layerAttr = '<a href="%s">%s</a>' % (attrUrl, attrText) if layer.type() == layer.VectorLayer and not is25d(layer, iface.mapCanvas(), restrictToExtent, extent): renderer = layer.rendererV2() if (cluster and isinstance(renderer, QgsSingleSymbolRendererV2)): cluster = True else: cluster = False if isinstance(renderer, QgsHeatmapRenderer): pointLayerType = "Heatmap" hmRadius = renderer.radius() colorRamp = renderer.colorRamp() hmStart = colorRamp.color1().name() hmEnd = colorRamp.color2().name() hmRamp = "['" + hmStart + "', " hmStops = colorRamp.stops() for stop in hmStops: hmRamp += "'" + stop.color.name() + "', " hmRamp += "'" + hmEnd + "']" hmWeight = renderer.weightExpression() hmWeightId = layer.fieldNameIndex(hmWeight) hmWeightMax = layer.maximumValue(hmWeightId) else: pointLayerType = "Vector" if matchCRS: mapCRS = iface.mapCanvas().mapSettings().destinationCrs().authid() crsConvert = """ {dataProjection: 'EPSG:4326', featureProjection: '%(d)s'}""" % { "d": mapCRS} else: crsConvert = """ {dataProjection: 'EPSG:4326', featureProjection: 'EPSG:3857'}""" if layer.providerType() == "WFS" and not encode2json: layerCode = '''var format_%(n)s = new ol.format.GeoJSON(); var jsonSource_%(n)s = new ol.source.Vector({ attributions: [new ol.Attribution({html: '%(layerAttr)s'})], format: format_%(n)s });''' % {"n": layerName, "layerAttr": layerAttr} if cluster: layerCode += '''cluster_%(n)s = new ol.source.Cluster({ distance: 10, source: jsonSource_%(n)s });''' % {"n": layerName} layerCode += '''var lyr_%(n)s = new ol.layer.Vector({ source: ''' % {"n": layerName} if cluster: layerCode += 'cluster_%(n)s,' % {"n": layerName} else: layerCode += 'jsonSource_%(n)s,' % {"n": layerName} layerCode += '''%(min)s %(max)s style: style_%(n)s, title: "%(name)s" }); function get%(n)sJson(geojson) { var features_%(n)s = format_%(n)s.readFeatures(geojson); jsonSource_%(n)s.addFeatures(features_%(n)s); }''' % { "name": layer.name(), "n": layerName, "min": minResolution, "max": maxResolution} return layerCode else: layerCode = '''var format_%(n)s = new ol.format.GeoJSON(); var features_%(n)s = format_%(n)s.readFeatures(geojson_%(n)s, %(crs)s); var jsonSource_%(n)s = new ol.source.Vector({ attributions: [new ol.Attribution({html: '%(layerAttr)s'})], }); jsonSource_%(n)s.addFeatures(features_%(n)s);''' % {"n": layerName, "crs": crsConvert, "layerAttr": layerAttr} if cluster: layerCode += '''cluster_%(n)s = new ol.source.Cluster({ distance: 10, source: jsonSource_%(n)s });''' % {"n": layerName} layerCode += '''var lyr_%(n)s = new ol.layer.%(t)s({ source:''' % {"n": layerName, "t": pointLayerType} if cluster: layerCode += 'cluster_%(n)s,' % {"n": layerName} else: layerCode += 'jsonSource_%(n)s,' % {"n": layerName} layerCode += '''%(min)s %(max)s''' % {"min": minResolution, "max": maxResolution} if pointLayerType == "Vector": layerCode += ''' style: style_%(n)s,''' % {"n": layerName} else: layerCode += ''' radius: %(hmRadius)d * 2, gradient: %(hmRamp)s, blur: 15, shadow: 250,''' % {"hmRadius": hmRadius, "hmRamp": hmRamp} if hmWeight != "": layerCode += ''' weight: function(feature){ var weightField = '%(hmWeight)s'; var featureWeight = feature.get(weightField); var maxWeight = %(hmWeightMax)d; var calibratedWeight = featureWeight/maxWeight; return calibratedWeight; },''' % {"hmWeight": hmWeight, "hmWeightMax": hmWeightMax} layerCode += ''' title: "%(name)s" });''' % {"name": layer.name()} return layerCode elif layer.type() == layer.RasterLayer: if layer.providerType().lower() == "wms": source = layer.source() opacity = layer.renderer().opacity() d = parse_qs(source) if "type" in d and d["type"][0] == "xyz": return """ var lyr_%s = new ol.layer.Tile({ 'title': '%s', 'type': 'base', 'opacity': %f, %s %s source: new ol.source.XYZ({ attributions: [new ol.Attribution({html: '%s'})], url: '%s' }) });""" % (layerName, layerName, opacity, minResolution, maxResolution, layerAttr, d["url"][0]) elif "tileMatrixSet" in d: layerId = d["layers"][0] url = d["url"][0] format = d["format"][0] style = d["styles"][0] return ''' var projection_%(n)s = ol.proj.get('EPSG:3857'); var projectionExtent_%(n)s = projection_%(n)s.getExtent(); var size_%(n)s = ol.extent.getWidth(projectionExtent_%(n)s) / 256; var resolutions_%(n)s = new Array(14); var matrixIds_%(n)s = new Array(14); for (var z = 0; z < 14; ++z) { // generate resolutions and matrixIds arrays for this WMTS resolutions_%(n)s[z] = size_%(n)s / Math.pow(2, z); matrixIds_%(n)s[z] = z; } var lyr_%(n)s = new ol.layer.Tile({ source: new ol.source.WMTS(({ url: "%(url)s", attributions: [new ol.Attribution({html: '%(layerAttr)s'})], "layer": "%(layerId)s", "TILED": "true", matrixSet: 'EPSG:3857', format: '%(format)s', projection: projection_%(n)s, tileGrid: new ol.tilegrid.WMTS({ origin: ol.extent.getTopLeft(projectionExtent_%(n)s), resolutions: resolutions_%(n)s, matrixIds: matrixIds_%(n)s }), style: '%(style)s', wrapX: true, "VERSION": "1.0.0", })), title: "%(name)s", opacity: %(opacity)s, %(minRes)s %(maxRes)s });''' % {"layerId": layerId, "url": url, "layerAttr": layerAttr, "format": format, "n": layerName, "name": layer.name(), "opacity": opacity, "style": style, "minRes": minResolution, "maxRes": maxResolution} else: layers = re.search(r"layers=(.*?)(?:&|$)", source).groups(0)[0] url = re.search(r"url=(.*?)(?:&|$)", source).groups(0)[0] metadata = layer.metadata() needle = "<tr><td>%s</td><td>(.+?)</td>" % ( QCoreApplication.translate("QgsWmsProvider", "WMS Version")) result = re.search(needle, metadata) if result: version = result.group(1) else: version = "" return '''var lyr_%(n)s = new ol.layer.Tile({ source: new ol.source.TileWMS(({ url: "%(url)s", attributions: [new ol.Attribution({html: '%(layerAttr)s'})],<|fim▁hole|> })), title: "%(name)s", opacity: %(opacity)f, %(minRes)s %(maxRes)s });''' % {"layers": layers, "url": url, "layerAttr": layerAttr, "n": layerName, "name": layer.name(), "version": version, "opacity": opacity, "minRes": minResolution, "maxRes": maxResolution} elif layer.providerType().lower() == "gdal": provider = layer.dataProvider() crsSrc = layer.crs() crsDest = QgsCoordinateReferenceSystem(3857) xform = QgsCoordinateTransform(crsSrc, crsDest) extentRep = xform.transform(layer.extent()) sExtent = "[%f, %f, %f, %f]" % (extentRep.xMinimum(), extentRep.yMinimum(), extentRep.xMaximum(), extentRep.yMaximum()) return '''var lyr_%(n)s = new ol.layer.Image({ opacity: 1, title: "%(name)s", %(minRes)s %(maxRes)s source: new ol.source.ImageStatic({ url: "./layers/%(n)s.png", attributions: [new ol.Attribution({html: '%(layerAttr)s'})], projection: 'EPSG:3857', alwaysInRange: true, //imageSize: [%(col)d, %(row)d], imageExtent: %(extent)s }) });''' % {"n": layerName, "extent": sExtent, "col": provider.xSize(), "name": layer.name(), "minRes": minResolution, "maxRes": maxResolution, "layerAttr": layerAttr, "row": provider.ySize()}<|fim▁end|>
params: { "LAYERS": "%(layers)s", "TILED": "true", "VERSION": "%(version)s"},
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|># it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Refer to the README and COPYING files for full details of the license # from __future__ import absolute_import from __future__ import division<|fim▁end|>
# # Copyright 2017 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify
<|file_name|>sound_alsa.cpp<|end_file_name|><|fim▁begin|>/* =========================================================================== Doom 3 GPL Source Code Copyright (C) 1999-2011 id Software LLC, a ZeniMax Media company. This file is part of the Doom 3 GPL Source Code (?Doom 3 Source Code?). Doom 3 Source Code is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Doom 3 Source Code is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Doom 3 Source Code. If not, see <http://www.gnu.org/licenses/>. In addition, the Doom 3 Source Code is also subject to certain additional terms. You should have received a copy of these additional terms immediately following the terms and conditions of the GNU General Public License which accompanied the Doom 3 Source Code. If not, please request a copy in writing from id Software at the address below. If you have questions concerning this license or the applicable additional terms, you may contact in writing id Software LLC, c/o ZeniMax Media Inc., Suite 120, Rockville, Maryland 20850 USA. =========================================================================== */ #include "../../idlib/precompiled.h" #include "../../sound/snd_local.h" #include "../posix/posix_public.h" #include "sound.h" #include <dlfcn.h> static idCVar s_alsa_pcm( "s_alsa_pcm", "default", CVAR_SYSTEM | CVAR_ARCHIVE, "which alsa pcm device to use. default, hwplug, hw.. see alsa docs" ); static idCVar s_alsa_lib( "s_alsa_lib", "libasound.so.2", CVAR_SYSTEM | CVAR_ARCHIVE, "alsa client sound library" ); /* =============== idAudioHardwareALSA::DLOpen =============== */ bool idAudioHardwareALSA::DLOpen() { const char* version; if( m_handle ) { return true; } common->Printf( "dlopen(%s)\n", s_alsa_lib.GetString() ); if( !( m_handle = dlopen( s_alsa_lib.GetString(), RTLD_NOW | RTLD_GLOBAL ) ) ) { common->Printf( "dlopen(%s) failed: %s\n", s_alsa_lib.GetString(), dlerror() ); return false; } // print the version if available id_snd_asoundlib_version = ( pfn_snd_asoundlib_version )dlsym( m_handle, "snd_asoundlib_version" ); if( !id_snd_asoundlib_version ) { common->Printf( "dlsym(\"snd_asoundlib_version\") failed: %s\n", dlerror() ); common->Warning( "please consider upgrading alsa to a more recent version." ); } else { version = id_snd_asoundlib_version(); common->Printf( "asoundlib version: %s\n", version ); } // dlsym the symbols ALSA_DLSYM( snd_pcm_avail_update ); ALSA_DLSYM( snd_pcm_close ); ALSA_DLSYM( snd_pcm_hw_params ); ALSA_DLSYM( snd_pcm_hw_params_any ); ALSA_DLSYM( snd_pcm_hw_params_get_buffer_size ); ALSA_DLSYM( snd_pcm_hw_params_set_access ); ALSA_DLSYM( snd_pcm_hw_params_set_buffer_size_min ); ALSA_DLSYM( snd_pcm_hw_params_set_channels ); ALSA_DLSYM( snd_pcm_hw_params_set_format ); ALSA_DLSYM( snd_pcm_hw_params_set_rate ); ALSA_DLSYM( snd_pcm_hw_params_sizeof ); ALSA_DLSYM( snd_pcm_open ); ALSA_DLSYM( snd_pcm_prepare ); ALSA_DLSYM( snd_pcm_state ); ALSA_DLSYM( snd_pcm_writei ); ALSA_DLSYM( snd_strerror ); return true; } /* =============== idAudioHardwareALSA::Release =============== */ void idAudioHardwareALSA::Release() { if( m_pcm_handle ) { common->Printf( "close pcm\n" ); id_snd_pcm_close( m_pcm_handle ); m_pcm_handle = NULL; } if( m_buffer ) { free( m_buffer ); m_buffer = NULL; } if( m_handle ) { common->Printf( "dlclose\n" ); dlclose( m_handle ); m_handle = NULL; } } /* ================= idAudioHardwareALSA::InitFailed ================= */ void idAudioHardwareALSA::InitFailed() { Release(); cvarSystem->SetCVarBool( "s_noSound", true ); common->Warning( "sound subsystem disabled\n" ); common->Printf( "--------------------------------------\n" ); } /* ===================== idAudioHardwareALSA::Initialize ===================== */ bool idAudioHardwareALSA::Initialize() { int err; common->Printf( "------ Alsa Sound Initialization -----\n" ); if( !DLOpen() ) { InitFailed(); return false; } if( ( err = id_snd_pcm_open( &m_pcm_handle, s_alsa_pcm.GetString(), SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK ) ) < 0 ) { common->Printf( "snd_pcm_open SND_PCM_STREAM_PLAYBACK '%s' failed: %s\n", s_alsa_pcm.GetString(), id_snd_strerror( err ) ); InitFailed(); return false; } common->Printf( "opened Alsa PCM device %s for playback\n", s_alsa_pcm.GetString() ); // set hardware parameters ---------------------------------------------------------------------- // init hwparams with the full configuration space snd_pcm_hw_params_t* hwparams; // this one is a define id_snd_pcm_hw_params_alloca( &hwparams ); if( ( err = id_snd_pcm_hw_params_any( m_pcm_handle, hwparams ) ) < 0 ) { common->Printf( "cannot configure the PCM device: %s\n", id_snd_strerror( err ) ); InitFailed(); return false; } if( ( err = id_snd_pcm_hw_params_set_access( m_pcm_handle, hwparams, SND_PCM_ACCESS_RW_INTERLEAVED ) ) < 0 ) { common->Printf( "SND_PCM_ACCESS_RW_INTERLEAVED failed: %s\n", id_snd_strerror( err ) ); InitFailed(); return false; } if( ( err = id_snd_pcm_hw_params_set_format( m_pcm_handle, hwparams, SND_PCM_FORMAT_S16_LE ) ) < 0 ) { common->Printf( "SND_PCM_FORMAT_S16_LE failed: %s\n", id_snd_strerror( err ) ); InitFailed(); return false; } // channels // sanity over number of speakers if( idSoundSystemLocal::s_numberOfSpeakers.GetInteger() != 6 && idSoundSystemLocal::s_numberOfSpeakers.GetInteger() != 2 ) { common->Warning( "invalid value for s_numberOfSpeakers. Use either 2 or 6" ); idSoundSystemLocal::s_numberOfSpeakers.SetInteger( 2 ); } m_channels = idSoundSystemLocal::s_numberOfSpeakers.GetInteger(); if( ( err = id_snd_pcm_hw_params_set_channels( m_pcm_handle, hwparams, m_channels ) ) < 0 ) { common->Printf( "error setting %d channels: %s\n", m_channels, id_snd_strerror( err ) ); if( idSoundSystemLocal::s_numberOfSpeakers.GetInteger() != 2 ) { // fallback to stereo if that works m_channels = 2; if( ( err = id_snd_pcm_hw_params_set_channels( m_pcm_handle, hwparams, m_channels ) ) < 0 ) { common->Printf( "fallback to stereo failed: %s\n", id_snd_strerror( err ) ); InitFailed(); return false; } else { common->Printf( "fallback to stereo\n" ); idSoundSystemLocal::s_numberOfSpeakers.SetInteger( 2 ); } } else { InitFailed(); return false; } } // set sample rate (frequency) if( ( err = id_snd_pcm_hw_params_set_rate( m_pcm_handle, hwparams, PRIMARYFREQ, 0 ) ) < 0 ) { common->Printf( "failed to set 44.1KHz rate: %s - try ( +set s_alsa_pcm plughw:0 ? )\n", id_snd_strerror( err ) ); InitFailed(); return false; } // have enough space in the input buffer for our MIXBUFFER_SAMPLE feedings and async ticks snd_pcm_uframes_t frames; frames = MIXBUFFER_SAMPLES + MIXBUFFER_SAMPLES / 3; if( ( err = id_snd_pcm_hw_params_set_buffer_size_min( m_pcm_handle, hwparams, &frames ) ) < 0 ) { common->Printf( "buffer size select failed: %s\n", id_snd_strerror( err ) ); InitFailed(); return false; } // apply parameters if( ( err = id_snd_pcm_hw_params( m_pcm_handle, hwparams ) ) < 0 ) { common->Printf( "snd_pcm_hw_params failed: %s\n", id_snd_strerror( err ) ); InitFailed(); return false; } // check the buffer size if( ( err = id_snd_pcm_hw_params_get_buffer_size( hwparams, &frames ) ) < 0 ) { common->Printf( "snd_pcm_hw_params_get_buffer_size failed: %s\n", id_snd_strerror( err ) ); } else { common->Printf( "device buffer size: %lu frames ( %lu bytes )\n", ( long unsigned int )frames, frames * m_channels * 2 ); } // TODO: can use swparams to setup the device so it doesn't underrun but rather loops over // snd_pcm_sw_params_set_stop_threshold // To get alsa to just loop on underruns. set the swparam stop_threshold to equal buffer size. The sound buffer will just loop and never throw an xrun. // allocate the final mix buffer m_buffer_size = MIXBUFFER_SAMPLES * m_channels * 2; m_buffer = malloc( m_buffer_size ); common->Printf( "allocated a mix buffer of %d bytes\n", m_buffer_size ); <|fim▁hole|> // verbose the state snd_pcm_state_t curstate = id_snd_pcm_state( m_pcm_handle ); assert( curstate == SND_PCM_STATE_PREPARED ); #endif common->Printf( "--------------------------------------\n" ); return true; } /* =============== idAudioHardwareALSA::~idAudioHardwareALSA =============== */ idAudioHardwareALSA::~idAudioHardwareALSA() { common->Printf( "----------- Alsa Shutdown ------------\n" ); Release(); common->Printf( "--------------------------------------\n" ); } /* ================= idAudioHardwareALSA::GetMixBufferSize ================= */ int idAudioHardwareALSA::GetMixBufferSize() { return m_buffer_size; } /* ================= idAudioHardwareALSA::GetMixBuffer ================= */ short* idAudioHardwareALSA::GetMixBuffer() { return ( short* )m_buffer; } /* =============== idAudioHardwareALSA::Flush =============== */ bool idAudioHardwareALSA::Flush() { int ret; snd_pcm_state_t state; state = id_snd_pcm_state( m_pcm_handle ); if( state != SND_PCM_STATE_RUNNING && state != SND_PCM_STATE_PREPARED ) { if( ( ret = id_snd_pcm_prepare( m_pcm_handle ) ) < 0 ) { Sys_Printf( "failed to recover from SND_PCM_STATE_XRUN: %s\n", id_snd_strerror( ret ) ); cvarSystem->SetCVarBool( "s_noSound", true ); return false; } Sys_Printf( "preparing audio device for output\n" ); } Write( true ); } /* =============== idAudioHardwareALSA::Write rely on m_freeWriteChunks which has been set in Flush() before engine did the mixing for this MIXBUFFER_SAMPLE =============== */ void idAudioHardwareALSA::Write( bool flushing ) { if( !flushing && m_remainingFrames ) { // if we write after a new mixing loop, we should have m_writeChunk == 0 // otherwise that last remaining chunk that was never flushed out to the audio device has just been overwritten Sys_Printf( "idAudioHardwareALSA::Write: %d frames overflowed and dropped\n", m_remainingFrames ); } if( !flushing ) { // if running after the mix loop, then we have a full buffer to write out m_remainingFrames = MIXBUFFER_SAMPLES; } if( m_remainingFrames == 0 ) { return; } // write the max frames you can in one shot - we need to write it all out in Flush() calls before the next Write() happens int pos = ( int )m_buffer + ( MIXBUFFER_SAMPLES - m_remainingFrames ) * m_channels * 2; snd_pcm_sframes_t frames = id_snd_pcm_writei( m_pcm_handle, ( void* )pos, m_remainingFrames ); if( frames < 0 ) { if( frames != -EAGAIN ) { Sys_Printf( "snd_pcm_writei %d frames failed: %s\n", m_remainingFrames, id_snd_strerror( frames ) ); } return; } m_remainingFrames -= frames; }<|fim▁end|>
#ifdef _DEBUG
<|file_name|>svhn.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- import os from fuel import config from fuel.datasets import H5PYDataset from fuel.transformers.defaults import uint8_pixels_to_floatX class SVHN(H5PYDataset): """The Street View House Numbers (SVHN) dataset. SVHN [SVHN] is a real-world image dataset for developing machine learning and object recognition algorithms with minimal requirement on data preprocessing and formatting. It can be seen as similar in flavor to MNIST [LBBH] (e.g., the images are of small cropped<|fim▁hole|> unsolved, real world problem (recognizing digits and numbers in natural scene images). SVHN is obtained from house numbers in Google Street View images. .. [SVHN] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco, Bo Wu, Andrew Y. Ng. *Reading Digits in Natural Images with Unsupervised Feature Learning*, NIPS Workshop on Deep Learning and Unsupervised Feature Learning, 2011. .. [LBBH] Yann LeCun, Léon Bottou, Yoshua Bengio, and Patrick Haffner, *Gradient-based learning applied to document recognition*, Proceedings of the IEEE, November 1998, 86(11):2278-2324. Parameters ---------- which_format : {1, 2} SVHN format 1 contains the full numbers, whereas SVHN format 2 contains cropped digits. which_set : {'train', 'test', 'extra'} Whether to load the training set (73,257 examples), the test set (26,032 examples) or the extra set (531,131 examples). Note that SVHN does not have a validation set; usually you will create your own training/validation split using the `subset` argument. """ filename = 'svhn_format_{}.hdf5' default_transformers = uint8_pixels_to_floatX(('features',)) def __init__(self, which_format, which_set, **kwargs): self.which_format = which_format super(SVHN, self).__init__(self.data_path, which_set, **kwargs) @property def data_path(self): return os.path.join( config.data_path, self.filename.format(self.which_format))<|fim▁end|>
digits), but incorporates an order of magnitude more labeled data (over 600,000 digit images) and comes from a significantly harder,
<|file_name|>projects_tags.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from django.core.urlresolvers import reverse from projects.models import ProjectBuild register = Library() @register.simple_tag() def build_url(build_id): """ Fetches the ProjectBuild for a given build_id, if any. """ try: build = ProjectBuild.objects.get(build_id=build_id) return reverse( "project_projectbuild_detail", kwargs={"project_pk": build.project.pk, "build_pk": build.pk}) except ProjectBuild.DoesNotExist: return ""<|fim▁end|>
from django.template.base import Library
<|file_name|>exit-normally.tsx<|end_file_name|><|fim▁begin|>import React from 'react';<|fim▁hole|><|fim▁end|>
import {Text, render} from '../..'; const {waitUntilExit} = render(<Text>Hello World</Text>); waitUntilExit().then(() => console.log('exited'));
<|file_name|>pyrits.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2011, De Verkeersonderneming <[email protected]> # # This file is part of PyRITS - A tool for processing and analyzing transport # management system data. # # PyRITS is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PyRITS is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """This is the executable for PyRITS. To tun this script, run it from the command line. The command line interface for PyRITS gives usage information. Run this script with the -h option to display help information. PyRITS has four submodules: preprocess, drivetimes, delays and report. The usage information for each submodule can be viewed by running the command, ./pyrits.py <module> -h """ import sys import os import logging import argparse import psycopg2 import pyrits.config import pyrits.erniesoft.std import pyrits.erniesoft.report __author__ = "Serrano Pereira" __copyright__ = "Copyright 2011, De Verkeersonderneming" __credits__ = ["Serrano Pereira <[email protected]>"] __license__ = "GPL3" __version__ = "0.1.2" __maintainer__ = "Serrano Pereira" __email__ = "[email protected]" __status__ = "Production" __date__ = "2011/11/24" def get_connection(db): """Return a PostgreSQL database connection object.""" conn_string = pyrits.config.cfg.get('conn_string', database=db) try: connection = psycopg2.connect(conn_string) return connection except: # Get the most recent exception exceptionType, exceptionValue, exceptionTraceback = sys.exc_info() # Exit the script and print an error telling what happened. sys.exit("Database connection failed!\n %s" % (exceptionValue)) def main(): # Set logging level. logging.basicConfig(level=logging.INFO, format='%(levelname)s %(message)s') # Read configurations from the config file. pyrits.config.cfg.read_configuration() # Create main argument parser. parser = argparse.ArgumentParser(description='Please specify a task.') parser.add_argument('--version', action='version', help="Print version information.", version="PyRITS "+__version__) # Create a sub parser for sub-commands. subparsers = parser.add_subparsers(help='Specify which task to start.') help_preprocess = "Perform preprocesses on the database. This must be run once after new data was added to the database." help_drivetimes = "Calculate driving times." help_delays = "Calculate driving delays." help_report = "Generate a report." # Create an argument parser for sub-command 'preprocess'. parser_preprocess = subparsers.add_parser('preprocess', help=help_preprocess, description=help_preprocess) parser_preprocess.add_argument('-d, --database', action='store', type=str, choices=['erniesoft','tans'], required=True, help="Specify database name. Possible values: erniesoft, tans.", metavar="DB", dest='database') # Create an argument parser for sub-command 'drivetimes'. parser_drivetimes = subparsers.add_parser('drivetimes', help=help_drivetimes, description=help_drivetimes) parser_drivetimes.add_argument('-d, --database', action='store', type=str, choices=['erniesoft','tans'], required=True, help="Specify database name. Possible values: erniesoft, tans.", metavar="DB", dest='database') parser_drivetimes.add_argument('-s, --date-start', action='store', type=str, required=False, help="Begin date of the records to analyze.", metavar="YYYY-MM-DD", dest='date_start') parser_drivetimes.add_argument('-e, --date-end', action='store', type=str, required=False, help="End date of the records to analyze.", metavar="YYYY-MM-DD", dest='date_end') parser_drivetimes.add_argument('-o', action='store', type=str, required=False, help="Specify output folder. If specified, results will be saved to this folder.", metavar="PATH", dest='output_folder') parser_drivetimes.add_argument('-v, --vehicle', action='store', type=str, required=False, help="Specify the vehicle to be analyzed. If not specified, all vehicles are analyzed.", metavar="CODE", dest='vehicle_code') parser_drivetimes.add_argument('-r, --ride', action='store', type=int, required=False, help="Specify ride number. If specified, only this ride is analyzed.", metavar="N", dest='ride_number') # Create an argument parser for sub-command 'delays'. parser_delays = subparsers.add_parser('delays', help=help_delays, description=help_delays) parser_delays.add_argument('-d, --database', action='store', type=str, choices=['erniesoft','tans'], required=True, help="Specify database name. Possible values: erniesoft, tans.", metavar="DB", dest='database') # Create an argument parser for sub-command 'report'. parser_report = subparsers.add_parser('report', help=help_report, description=help_report) parser_report.add_argument('-d, --database', action='store', type=str, choices=['erniesoft','tans'], required=True, help="Specify database name. Possible values: erniesoft, tans.", metavar="DB", dest='database') parser_report.add_argument('-s, --date-start', action='store', type=str, required=True, help="Specify start date.", metavar="YYYY-MM-DD", dest='date_start') parser_report.add_argument('-e, --date-end', action='store', type=str, required=True, help="Specify end date.", metavar="YYYY-MM-DD", dest='date_end') parser_report.add_argument('-o', action='store', type=str, required=True, help="Specify output file.", metavar="FILE", dest='output_file') parser_report.add_argument('-t, --type', action='store', default='xml', type=str, choices=['xml','csv-tasks','csv-stats','routes'], required=False,<|fim▁hole|> dest='output_format') parser_report.add_argument('--zip-depth', action='store', type=int, default=10, choices=[4,5,6,7,8,9,10], required=False, help="Zip code depth for grouping routes in reports. Default is 10.", metavar="N", dest='zip_depth') parser_report.add_argument('--top-max', action='store', type=int, default=5, required=False, help="The maximum number of items in a top list. Default is 5.", metavar="N", dest='top_max') parser_report.add_argument('--filter-countries', action='store', type=str, default=None, required=False, help="Used for the 'routes' report. Filter routes for specific countries. Multiple countries must be separated by comma's (e.g. nl,de).", metavar="CODES", dest='filter_countries') parser_report.add_argument('--routes-filter-actions', action='store', type=str, default=None, required=False, help="Used for the 'routes' report. Filter routes for specific action codes. Multiple action codes must be separated by comma's (e.g. laden,lossen).", metavar="ACTIONS", dest='routes_filter_actions') parser_report.add_argument('--routes-method', action='store', type=int, default=1, choices=[1,2], required=False, help="Used for the 'routes' report. Set the method for defining routes. Possible values: 1 for location combinations (default), 2 for location permutations.", metavar="N", dest='routes_method') parser_report.add_argument('--routes-no-replacement', action='store_const', const=True, required=False, help="Used for the 'routes' report. Skip routes with the same start and end location.", dest='routes_no_replacement') # Parse the arguments. args = parser.parse_args() # Decide which tool to start. if sys.argv[1] == 'drivetimes': drivetimes(args) elif sys.argv[1] == 'delays': delays(args) elif sys.argv[1] == 'report': report(args) elif sys.argv[1] == 'preprocess': preprocess(args) # Terminate the application. sys.exit() def drivetimes(args): """Calculate the realized driving times.""" # Do some extra checks. if args.output_folder and not os.path.exists(args.output_folder): sys.exit("Path '%s' does not exists." % args.output_folder) if args.database == "erniesoft": # Make a connection with the database. connection = get_connection(args.database) # Create instance of driving times calculator. driving_times = pyrits.erniesoft.std.DrivingTimes(connection) # Calculate for ride number. if args.ride_number: exit_status = driving_times.calculate_ride(args.ride_number) # Save results to database. #if exit_status == 0: driving_times.save_to_database() # Write results to output file and display results in browser. if exit_status == 0 and args.output_folder: output_file = "drivetimes_ride_%s.html" % args.ride_number output_file = os.path.join(args.output_folder, output_file) driving_times.output_html(output_file, open_file=1) # Calculate for a single vehicle. elif args.vehicle_code: exit_status = driving_times.calculate_vehicle(args.vehicle_code, args.date_start, args.date_end) # Save results to database. #if exit_status == 0: driving_times.save_to_database() # Write results to output file and display results in browser. if exit_status == 0 and args.output_folder: if not args.date_start: output_file = "drivetimes_%s.html" % (args.vehicle_code) else: output_file = "drivetimes_%s_%s_%s.html" % (args.date_start, args.date_end, args.vehicle_code) output_file = os.path.join(args.output_folder, output_file) driving_times.output_html(output_file, open_file=1) # Calculate for all vehicles. else: # Obtain all vehicle codes that belong to this date range. logging.info("Obtaining vehicle codes...") vehicle_codes = driving_times.get_vehicles_from_date_range(args.date_start, args.date_end) vehicle_codes.sort() if len(vehicle_codes) == 0: logging.info("No vehicles found. Nothing to do.") # Remove existing driving times from the database. logging.info("Purging driving times table...") driving_times.purge() # Process data. for vehicle_code in vehicle_codes: exit_status = driving_times.calculate_vehicle(vehicle_code, args.date_start, args.date_end) # Save results to database. if exit_status == 0: driving_times.save_to_database() # Write results to output file and display results in browser. if exit_status == 0 and args.output_folder: if not args.date_start: output_file = "drivetimes_%s.html" % (vehicle_code) else: output_file = "drivetimes_%s_%s_%s.html" % (args.date_start, args.date_end, vehicle_code) output_file = os.path.join(args.output_folder, output_file) driving_times.output_html(output_file, open_file=0) # Close database connection. connection.close() logging.info("Done") elif args.database == "tans": sys.exit("Not yet implemented.") def delays(args): """Calculate the planned and realized driving delays.""" if args.database == "erniesoft": # Make a connection with the database. connection = get_connection(args.database) # Create instance of driving times calculator. driving_delays = pyrits.erniesoft.std.DrivingDelays(connection) # Calculate delays (this first sets the clean driving times # and the planned delays, then the realized delays). driving_delays.calculate_delays_using_modelit() # Close database connection. connection.close() logging.info("Done") def delays_(args): """Calculate the planned and realized driving delays. .. note:: This function shows the old method of calculating planned and realized delays. This function has been replaced by :meth:`delays` which uses Modelit data. This function is no longer in use and is kept here for reference. """ if args.database == "erniesoft": # Make a connection with the database. connection = get_connection(args.database) # Create instance of driving times calculator. driving_delays = pyrits.erniesoft.std.DrivingDelays(connection) # Calculate delays for a single vehicle. if args.vehicle_code: # Calculate realized delays. driving_delays.calculate_realized_delays(args.vehicle_code) # Planned delays must be calculated after the realized delays. driving_delays.calculate_planned_delays() # Calculate delays for all vehicles. else: # Obtain all vehicle codes. logging.info("Obtaining vehicle codes...") vehicle_codes = driving_delays.get_vehicles_from_date_range() vehicle_codes.sort() if len(vehicle_codes) == 0: logging.info("No vehicles found. Nothing to do.") # Calculate the realized delays for each vehicle. for vehicle_code in vehicle_codes: driving_delays.calculate_realized_delays(vehicle_code) # Planned delays must be calculated after the realized delays. driving_delays.calculate_planned_delays() # Close database connection. connection.close() logging.info("Done") elif args.database == "tans": sys.exit("Not yet implemented.") def report(args): """Generate a report of the driving times and driving delays.""" # Perform extra checks. if not os.path.dirname(args.output_file): sys.exit("Path '%s' does not exists." % args.output_file) if args.zip_depth: pyrits.config.cfg.set('zip-code-depth', args.zip_depth) if args.top_max: pyrits.config.cfg.set('report-top-list-max', args.top_max) if args.filter_countries: args.filter_countries = args.filter_countries.split(',') if args.routes_filter_actions: args.routes_filter_actions = args.routes_filter_actions.split(',') if args.routes_no_replacement: routes_replacement = False else: routes_replacement = True # Start calculations. if args.database == "erniesoft": # Make a connection with the database. connection = get_connection(args.database) # Export the report. if args.output_format == 'xml': report = pyrits.erniesoft.report.XMLReport(connection) report.make(args.date_start, args.date_end) report.export(args.output_file) elif args.output_format == 'csv-tasks': report = pyrits.erniesoft.report.CSVReport(connection) report.export_driving_times(args.output_file, args.date_start, args.date_end) elif args.output_format == 'csv-stats': report = pyrits.erniesoft.report.CSVReport(connection) report.export_statistics(args.output_file, args.date_start, args.date_end) elif args.output_format == 'routes': report = pyrits.erniesoft.report.CSVReport(connection) report.set_route_frequencies(args.routes_method, args.filter_countries, args.routes_filter_actions, routes_replacement) report.export_route_frequencies(args.output_file) # Close database connection. connection.close() elif args.database == "tans": sys.exit("Not yet implemented.") def preprocess(args): """Preprocess the database. Preprocesses must be performed on each database before doing any other task. - For the Erniesoft database, this means defining the vehicle code and route for each task. """ if args.database == "erniesoft": # Make a connection with the database. connection = get_connection(args.database) # Process data. preprocessor = pyrits.erniesoft.std.Preprocess(connection) preprocessor.start() # Close database connection. connection.close() elif args.database == "tans": sys.exit("Not yet implemented.") if __name__ == "__main__": main()<|fim▁end|>
help="Specify output format for the report. Possibe values: xml, csv-tasks, csv-stats, routes. Default is xml.", metavar="TYPE",
<|file_name|>constructorReturnsInvalidType.js<|end_file_name|><|fim▁begin|>//// [constructorReturnsInvalidType.ts] class X { constructor() { return 1; } foo() { } } var x = new X(); //// [constructorReturnsInvalidType.js] var X = (function () { function X() { return 1; } X.prototype.foo = function () { }; return X; })(); <|fim▁hole|>var x = new X();<|fim▁end|>
<|file_name|>Demo1.java<|end_file_name|><|fim▁begin|>package com.asksunny.batch.tasklets; public class Demo1 { long id; String name; public Demo1() { } public long getId() { return id; } public void setId(long id) { this.id = id; } public String getName() { return name; } <|fim▁hole|> }<|fim▁end|>
public void setName(String name) { this.name = name; }
<|file_name|>DNAFlatBuilding.py<|end_file_name|><|fim▁begin|>from panda3d.core import NodePath, DecalEffect import DNANode import DNAWall import random class DNAFlatBuilding(DNANode.DNANode): COMPONENT_CODE = 9 currentWallHeight = 0 def __init__(self, name): DNANode.DNANode.__init__(self, name) self.width = 0 self.hasDoor = False def setWidth(self, width): self.width = width <|fim▁hole|> def setCurrentWallHeight(self, currentWallHeight): DNAFlatBuilding.currentWallHeight = currentWallHeight def getCurrentWallHeight(self): return DNAFlatBuilding.currentWallHeight def setHasDoor(self, hasDoor): self.hasDoor = hasDoor def getHasDoor(self): return self.hasDoor def makeFromDGI(self, dgi): DNANode.DNANode.makeFromDGI(self, dgi) self.width = dgi.getInt16() / 100.0 self.hasDoor = dgi.getBool() def setupSuitFlatBuilding(self, nodePath, dnaStorage): name = self.getName() if name[:2] != 'tb': return name = 'sb' + name[2:] node = nodePath.attachNewNode(name) node.setPosHpr(self.getPos(), self.getHpr()) numCodes = dnaStorage.getNumCatalogCodes('suit_wall') if numCodes < 1: return code = dnaStorage.getCatalogCode( 'suit_wall', random.randint(0, numCodes - 1)) wallNode = dnaStorage.findNode(code) if not wallNode: return wallNode = wallNode.copyTo(node, 0) wallScale = wallNode.getScale() wallScale.setX(self.width) wallScale.setZ(DNAFlatBuilding.currentWallHeight) wallNode.setScale(wallScale) if self.getHasDoor(): wallNodePath = node.find('wall_*') doorNode = dnaStorage.findNode('suit_door') doorNode = doorNode.copyTo(wallNodePath, 0) doorNode.setScale(NodePath(), (1, 1, 1)) doorNode.setPosHpr(0.5, 0, 0, 0, 0, 0) wallNodePath.setEffect(DecalEffect.make()) node.flattenMedium() node.stash() def setupCogdoFlatBuilding(self, nodePath, dnaStorage): name = self.getName() if name[:2] != 'tb': return name = 'cb' + name[2:] node = nodePath.attachNewNode(name) node.setPosHpr(self.getPos(), self.getHpr()) numCodes = dnaStorage.getNumCatalogCodes('cogdo_wall') if numCodes < 1: return code = dnaStorage.getCatalogCode( 'cogdo_wall', random.randint(0, numCodes - 1)) wallNode = dnaStorage.findNode(code) if not wallNode: return wallNode = wallNode.copyTo(node, 0) wallScale = wallNode.getScale() wallScale.setX(self.width) wallScale.setZ(DNAFlatBuilding.currentWallHeight) wallNode.setScale(wallScale) if self.getHasDoor(): wallNodePath = node.find('wall_*') doorNode = dnaStorage.findNode('suit_door') doorNode = doorNode.copyTo(wallNodePath, 0) doorNode.setScale(NodePath(), (1, 1, 1)) doorNode.setPosHpr(0.5, 0, 0, 0, 0, 0) wallNodePath.setEffect(DecalEffect.make()) node.flattenMedium() node.stash() def traverse(self, nodePath, dnaStorage): DNAFlatBuilding.currentWallHeight = 0 node = nodePath.attachNewNode(self.getName()) internalNode = node.attachNewNode(self.getName() + '-internal') scale = self.getScale() scale.setX(self.width) internalNode.setScale(scale) node.setPosHpr(self.getPos(), self.getHpr()) for child in self.children: if isinstance(child, DNAWall.DNAWall): child.traverse(internalNode, dnaStorage) else: child.traverse(node, dnaStorage) if DNAFlatBuilding.currentWallHeight == 0: print 'empty flat building with no walls' else: cameraBarrier = dnaStorage.findNode('wall_camera_barrier') if cameraBarrier is None: raise DNAError.DNAError('DNAFlatBuilding requires that there is a wall_camera_barrier in storage') cameraBarrier = cameraBarrier.copyTo(internalNode, 0) cameraBarrier.setScale((1, 1, DNAFlatBuilding.currentWallHeight)) internalNode.flattenStrong() collisionNode = node.find('**/door_*/+CollisionNode') if not collisionNode.isEmpty(): collisionNode.setName('KnockKnockDoorSphere_' + dnaStorage.getBlock(self.getName())) cameraBarrier.wrtReparentTo(nodePath, 0) wallCollection = internalNode.findAllMatches('wall*') wallHolder = node.attachNewNode('wall_holder') wallDecal = node.attachNewNode('wall_decal') windowCollection = internalNode.findAllMatches('**/window*') doorCollection = internalNode.findAllMatches('**/door*') corniceCollection = internalNode.findAllMatches('**/cornice*_d') wallCollection.reparentTo(wallHolder) windowCollection.reparentTo(wallDecal) doorCollection.reparentTo(wallDecal) corniceCollection.reparentTo(wallDecal) for i in xrange(wallHolder.getNumChildren()): iNode = wallHolder.getChild(i) iNode.clearTag('DNACode') iNode.clearTag('DNARoot') wallHolder.flattenStrong() wallDecal.flattenStrong() holderChild0 = wallHolder.getChild(0) wallDecal.getChildren().reparentTo(holderChild0) holderChild0.reparentTo(internalNode) holderChild0.setEffect(DecalEffect.make()) wallHolder.removeNode() wallDecal.removeNode() self.setupSuitFlatBuilding(nodePath, dnaStorage) self.setupCogdoFlatBuilding(nodePath, dnaStorage) node.flattenStrong()<|fim▁end|>
def getWidth(self): return self.width
<|file_name|>table_core.js<|end_file_name|><|fim▁begin|>/* * mobile table unit tests */<|fim▁hole|>(function($){ module( "Basic Table", { setup: function(){ var hash = "#basic-table-test"; if( location.hash != hash ){ stop(); $(document).one("pagechange", function() { start(); }); $.mobile.changePage( hash ); } }, teardown: function() { } }); asyncTest( "The page should be enhanced correctly" , function(){ setTimeout(function() { var $table = $('#basic-table-test .ui-table'); ok( $table.length, ".ui-table class added to table element" ); start(); }, 800); }); asyncTest( "Has data object attributed to table" , function(){ setTimeout(function(){ var $table = $('#basic-table-test .ui-table'), self = $table.data( "mobile-table" ); ok( self , "Data object is available" ); start(); }, 800); }); asyncTest( "Has headers option" , function(){ setTimeout(function() { var $table = $('#basic-table-test .ui-table'), self = $table.data( "mobile-table" ); ok( self.headers.length , "Header array is not empty"); equal( 5 , self.headers.length , "Number of headers is correct"); start(); }, 800); }); module( "Reflow Mode", { setup: function(){ var hash = "#reflow-table-test"; if( location.hash != hash ){ stop(); $(document).one("pagechange", function() { start(); }); $.mobile.changePage( hash ); } }, teardown: function() { } }); asyncTest( "The page should be enhanced correctly" , function(){ setTimeout(function() { ok($('#reflow-table-test .ui-table-reflow').length, ".ui-table-reflow class added to table element"); start(); }, 800); }); asyncTest( "The appropriate label is added" , function(){ setTimeout(function(){ var $table = $( "#reflow-table-test table" ), $body = $table.find( "tbody" ), $tds = $body.find( "td" ), labels = $tds.find( "b.ui-table-cell-label" ); ok( labels , "Appropriate label placed" ); equal( $( labels[0] ).text(), "Movie Title" , "Appropriate label placed" ); start(); }, 800); }); module( "Column toggle table Mode", { setup: function(){ var hash = "#column-table-test"; if( location.hash != hash ){ stop(); $(document).one("pagechange", function() { start(); }); $.mobile.changePage( hash ); } }, teardown: function() { } }); asyncTest( "The page should be enhanced correctly" , function(){ setTimeout(function() { var $popup = $('#column-table-test #movie-table-column-popup-popup'); ok($('#column-table-test .ui-table-columntoggle').length, ".ui-table-columntoggle class added to table element"); ok($('#column-table-test .ui-table-columntoggle-btn').length, ".ui-table-columntoggle-btn button added"); equal($('#column-table-test .ui-table-columntoggle-btn').text(), "Columns...", "Column toggle button has correct text"); ok( $popup.length, "dialog added" ); ok( $popup.is( ".ui-popup-hidden" ) , "dialog hidden"); ok($('#column-table-test #movie-table-column-popup-popup').find( "input[type=checkbox]" ).length > 0 , "Checkboxes added"); start(); }, 800); }); asyncTest( "The dialog should become visible when button is clicked" , function(){ expect( 2 ); var $input; $.testHelper.pageSequence([ function() { $( ".ui-table-columntoggle-btn" ).click(); }, function() { setTimeout(function() { ok( $( "#movie-table-column-popup-popup" ).not( ".ui-popup-hidden" ) , "Table popup is shown on click" ); }, 800); }, function() { $input = $( ".ui-popup-container" ).find( "input:first" ); $input.click(); }, function(){ setTimeout(function(){ var headers = $( "#column-table-test table tr" ).find( "th:first" ); if( $input.is( ":checked" ) ){ ok( headers.not( ".ui-table-cell-hidden" ) ); } else { ok( headers.is( ".ui-table-cell-hidden" ) ); } }, 800); }, function() { start(); } ]); }); })(jQuery);<|fim▁end|>
<|file_name|>testbinding.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ // check-tidy: no specs after this line use dom::bindings::codegen::Bindings::EventListenerBinding::EventListener; use dom::bindings::codegen::Bindings::FunctionBinding::Function; use dom::bindings::codegen::Bindings::TestBindingBinding::{self, TestBindingMethods, TestEnum}; use dom::bindings::codegen::UnionTypes::{BlobOrBoolean, BlobOrBlobSequence}; use dom::bindings::codegen::UnionTypes::{BlobOrString, BlobOrUnsignedLong, EventOrString}; use dom::bindings::codegen::UnionTypes::{EventOrUSVString, HTMLElementOrLong}; use dom::bindings::codegen::UnionTypes::{HTMLElementOrUnsignedLongOrStringOrBoolean, LongSequenceOrBoolean}; use dom::bindings::codegen::UnionTypes::{StringOrLongSequence, StringOrStringSequence, StringSequenceOrUnsignedLong}; use dom::bindings::codegen::UnionTypes::{StringOrUnsignedLong, StringOrBoolean, UnsignedLongOrBoolean}; use dom::bindings::error::Fallible; use dom::bindings::global::GlobalRef; use dom::bindings::js::Root; use dom::bindings::num::Finite; use dom::bindings::reflector::{Reflectable, Reflector, reflect_dom_object}; use dom::bindings::str::{ByteString, USVString}; use dom::bindings::weakref::MutableWeakRef; use dom::blob::Blob; use dom::url::URL; use js::jsapi::{HandleValue, JSContext, JSObject}; use js::jsval::{JSVal, NullValue}; use std::borrow::ToOwned; use std::ptr; use std::rc::Rc; use util::str::DOMString; #[dom_struct] pub struct TestBinding { reflector_: Reflector, url: MutableWeakRef<URL>, } impl TestBinding { fn new_inherited() -> TestBinding { TestBinding { reflector_: Reflector::new(), url: MutableWeakRef::new(None), } } pub fn new(global: GlobalRef) -> Root<TestBinding> { reflect_dom_object(box TestBinding::new_inherited(), global, TestBindingBinding::Wrap) } pub fn Constructor(global: GlobalRef) -> Fallible<Root<TestBinding>> { Ok(TestBinding::new(global)) } #[allow(unused_variables)] pub fn Constructor_(global: GlobalRef, nums: Vec<f64>) -> Fallible<Root<TestBinding>> { Ok(TestBinding::new(global)) } #[allow(unused_variables)] pub fn Constructor__(global: GlobalRef, num: f64) -> Fallible<Root<TestBinding>> { Ok(TestBinding::new(global)) } } impl TestBindingMethods for TestBinding { fn BooleanAttribute(&self) -> bool { false } fn SetBooleanAttribute(&self, _: bool) {} fn ByteAttribute(&self) -> i8 { 0 } fn SetByteAttribute(&self, _: i8) {} fn OctetAttribute(&self) -> u8 { 0 } fn SetOctetAttribute(&self, _: u8) {} fn ShortAttribute(&self) -> i16 { 0 } fn SetShortAttribute(&self, _: i16) {} fn UnsignedShortAttribute(&self) -> u16 { 0 } fn SetUnsignedShortAttribute(&self, _: u16) {} fn LongAttribute(&self) -> i32 { 0 } fn SetLongAttribute(&self, _: i32) {} fn UnsignedLongAttribute(&self) -> u32 { 0 } fn SetUnsignedLongAttribute(&self, _: u32) {} fn LongLongAttribute(&self) -> i64 { 0 } fn SetLongLongAttribute(&self, _: i64) {} fn UnsignedLongLongAttribute(&self) -> u64 { 0 } fn SetUnsignedLongLongAttribute(&self, _: u64) {} fn UnrestrictedFloatAttribute(&self) -> f32 { 0. } fn SetUnrestrictedFloatAttribute(&self, _: f32) {} fn FloatAttribute(&self) -> Finite<f32> { Finite::wrap(0.) } fn SetFloatAttribute(&self, _: Finite<f32>) {} fn UnrestrictedDoubleAttribute(&self) -> f64 { 0. } fn SetUnrestrictedDoubleAttribute(&self, _: f64) {} fn DoubleAttribute(&self) -> Finite<f64> { Finite::wrap(0.) } fn SetDoubleAttribute(&self, _: Finite<f64>) {} fn StringAttribute(&self) -> DOMString { DOMString::new() } fn SetStringAttribute(&self, _: DOMString) {} fn UsvstringAttribute(&self) -> USVString { USVString("".to_owned()) } fn SetUsvstringAttribute(&self, _: USVString) {} fn ByteStringAttribute(&self) -> ByteString { ByteString::new(vec!()) } fn SetByteStringAttribute(&self, _: ByteString) {} fn EnumAttribute(&self) -> TestEnum { TestEnum::_empty } fn SetEnumAttribute(&self, _: TestEnum) {} fn InterfaceAttribute(&self) -> Root<Blob> { Blob::new(self.global().r(), Vec::new(), "") } fn SetInterfaceAttribute(&self, _: &Blob) {} fn UnionAttribute(&self) -> HTMLElementOrLong { HTMLElementOrLong::Long(0) } fn SetUnionAttribute(&self, _: HTMLElementOrLong) {} fn Union2Attribute(&self) -> EventOrString { EventOrString::String(DOMString::new()) } fn SetUnion2Attribute(&self, _: EventOrString) {} fn Union3Attribute(&self) -> EventOrUSVString { EventOrUSVString::USVString(USVString("".to_owned())) } fn SetUnion3Attribute(&self, _: EventOrUSVString) {} fn Union4Attribute(&self) -> StringOrUnsignedLong { StringOrUnsignedLong::UnsignedLong(0u32) } fn SetUnion4Attribute(&self, _: StringOrUnsignedLong) {} fn Union5Attribute(&self) -> StringOrBoolean { StringOrBoolean::Boolean(true) } fn SetUnion5Attribute(&self, _: StringOrBoolean) {} fn Union6Attribute(&self) -> UnsignedLongOrBoolean { UnsignedLongOrBoolean::Boolean(true) } fn SetUnion6Attribute(&self, _: UnsignedLongOrBoolean) {} fn Union7Attribute(&self) -> BlobOrBoolean { BlobOrBoolean::Boolean(true) } fn SetUnion7Attribute(&self, _: BlobOrBoolean) {} fn Union8Attribute(&self) -> BlobOrUnsignedLong { BlobOrUnsignedLong::UnsignedLong(0u32) } fn SetUnion8Attribute(&self, _: BlobOrUnsignedLong) {} fn ArrayAttribute(&self, _: *mut JSContext) -> *mut JSObject { NullValue().to_object_or_null() } fn AnyAttribute(&self, _: *mut JSContext) -> JSVal { NullValue() } fn SetAnyAttribute(&self, _: *mut JSContext, _: HandleValue) {} fn ObjectAttribute(&self, _: *mut JSContext) -> *mut JSObject { panic!() } fn SetObjectAttribute(&self, _: *mut JSContext, _: *mut JSObject) {} fn GetBooleanAttributeNullable(&self) -> Option<bool> { Some(false) } fn SetBooleanAttributeNullable(&self, _: Option<bool>) {} fn GetByteAttributeNullable(&self) -> Option<i8> { Some(0) } fn SetByteAttributeNullable(&self, _: Option<i8>) {} fn GetOctetAttributeNullable(&self) -> Option<u8> { Some(0) } fn SetOctetAttributeNullable(&self, _: Option<u8>) {} fn GetShortAttributeNullable(&self) -> Option<i16> { Some(0) } fn SetShortAttributeNullable(&self, _: Option<i16>) {} fn GetUnsignedShortAttributeNullable(&self) -> Option<u16> { Some(0) } fn SetUnsignedShortAttributeNullable(&self, _: Option<u16>) {} fn GetLongAttributeNullable(&self) -> Option<i32> { Some(0) } fn SetLongAttributeNullable(&self, _: Option<i32>) {} fn GetUnsignedLongAttributeNullable(&self) -> Option<u32> { Some(0) } fn SetUnsignedLongAttributeNullable(&self, _: Option<u32>) {} fn GetLongLongAttributeNullable(&self) -> Option<i64> { Some(0) } fn SetLongLongAttributeNullable(&self, _: Option<i64>) {} fn GetUnsignedLongLongAttributeNullable(&self) -> Option<u64> { Some(0) } fn SetUnsignedLongLongAttributeNullable(&self, _: Option<u64>) {} fn GetUnrestrictedFloatAttributeNullable(&self) -> Option<f32> { Some(0.) } fn SetUnrestrictedFloatAttributeNullable(&self, _: Option<f32>) {} fn GetFloatAttributeNullable(&self) -> Option<Finite<f32>> { Some(Finite::wrap(0.)) } fn SetFloatAttributeNullable(&self, _: Option<Finite<f32>>) {} fn GetUnrestrictedDoubleAttributeNullable(&self) -> Option<f64> { Some(0.) } fn SetUnrestrictedDoubleAttributeNullable(&self, _: Option<f64>) {} fn GetDoubleAttributeNullable(&self) -> Option<Finite<f64>> { Some(Finite::wrap(0.)) } fn SetDoubleAttributeNullable(&self, _: Option<Finite<f64>>) {} fn GetByteStringAttributeNullable(&self) -> Option<ByteString> { Some(ByteString::new(vec!())) } fn SetByteStringAttributeNullable(&self, _: Option<ByteString>) {} fn GetStringAttributeNullable(&self) -> Option<DOMString> { Some(DOMString::new()) } fn SetStringAttributeNullable(&self, _: Option<DOMString>) {} fn GetUsvstringAttributeNullable(&self) -> Option<USVString> { Some(USVString("".to_owned())) } fn SetUsvstringAttributeNullable(&self, _: Option<USVString>) {} fn SetBinaryRenamedAttribute(&self, _: DOMString) {} fn ForwardedAttribute(&self) -> Root<TestBinding> { Root::from_ref(self) } fn BinaryRenamedAttribute(&self) -> DOMString { DOMString::new() } fn SetBinaryRenamedAttribute2(&self, _: DOMString) {} fn BinaryRenamedAttribute2(&self) -> DOMString { DOMString::new() } fn Attr_to_automatically_rename(&self) -> DOMString { DOMString::new() } fn SetAttr_to_automatically_rename(&self, _: DOMString) {} fn GetEnumAttributeNullable(&self) -> Option<TestEnum> { Some(TestEnum::_empty) }<|fim▁hole|> Some(Blob::new(self.global().r(), Vec::new(), "")) } fn SetInterfaceAttributeNullable(&self, _: Option<&Blob>) {} fn GetInterfaceAttributeWeak(&self) -> Option<Root<URL>> { self.url.root() } fn SetInterfaceAttributeWeak(&self, url: Option<&URL>) { self.url.set(url); } fn GetObjectAttributeNullable(&self, _: *mut JSContext) -> *mut JSObject { ptr::null_mut() } fn SetObjectAttributeNullable(&self, _: *mut JSContext, _: *mut JSObject) {} fn GetUnionAttributeNullable(&self) -> Option<HTMLElementOrLong> { Some(HTMLElementOrLong::Long(0)) } fn SetUnionAttributeNullable(&self, _: Option<HTMLElementOrLong>) {} fn GetUnion2AttributeNullable(&self) -> Option<EventOrString> { Some(EventOrString::String(DOMString::new())) } fn SetUnion2AttributeNullable(&self, _: Option<EventOrString>) {} fn GetUnion3AttributeNullable(&self) -> Option<BlobOrBoolean> { Some(BlobOrBoolean::Boolean(true)) } fn SetUnion3AttributeNullable(&self, _: Option<BlobOrBoolean>) {} fn GetUnion4AttributeNullable(&self) -> Option<UnsignedLongOrBoolean> { Some(UnsignedLongOrBoolean::Boolean(true)) } fn SetUnion4AttributeNullable(&self, _: Option<UnsignedLongOrBoolean>) {} fn GetUnion5AttributeNullable(&self) -> Option<StringOrBoolean> { Some(StringOrBoolean::Boolean(true)) } fn SetUnion5AttributeNullable(&self, _: Option<StringOrBoolean>) {} fn BinaryRenamedMethod(&self) -> () {} fn ReceiveVoid(&self) -> () {} fn ReceiveBoolean(&self) -> bool { false } fn ReceiveByte(&self) -> i8 { 0 } fn ReceiveOctet(&self) -> u8 { 0 } fn ReceiveShort(&self) -> i16 { 0 } fn ReceiveUnsignedShort(&self) -> u16 { 0 } fn ReceiveLong(&self) -> i32 { 0 } fn ReceiveUnsignedLong(&self) -> u32 { 0 } fn ReceiveLongLong(&self) -> i64 { 0 } fn ReceiveUnsignedLongLong(&self) -> u64 { 0 } fn ReceiveUnrestrictedFloat(&self) -> f32 { 0. } fn ReceiveFloat(&self) -> Finite<f32> { Finite::wrap(0.) } fn ReceiveUnrestrictedDouble(&self) -> f64 { 0. } fn ReceiveDouble(&self) -> Finite<f64> { Finite::wrap(0.) } fn ReceiveString(&self) -> DOMString { DOMString::new() } fn ReceiveUsvstring(&self) -> USVString { USVString("".to_owned()) } fn ReceiveByteString(&self) -> ByteString { ByteString::new(vec!()) } fn ReceiveEnum(&self) -> TestEnum { TestEnum::_empty } fn ReceiveInterface(&self) -> Root<Blob> { Blob::new(self.global().r(), Vec::new(), "") } fn ReceiveAny(&self, _: *mut JSContext) -> JSVal { NullValue() } fn ReceiveObject(&self, _: *mut JSContext) -> *mut JSObject { panic!() } fn ReceiveUnion(&self) -> HTMLElementOrLong { HTMLElementOrLong::Long(0) } fn ReceiveUnion2(&self) -> EventOrString { EventOrString::String(DOMString::new()) } fn ReceiveUnion3(&self) -> StringOrLongSequence { StringOrLongSequence::LongSequence(vec![]) } fn ReceiveUnion4(&self) -> StringOrStringSequence { StringOrStringSequence::StringSequence(vec![]) } fn ReceiveUnion5(&self) -> BlobOrBlobSequence { BlobOrBlobSequence::BlobSequence(vec![]) } fn ReceiveUnion6(&self) -> StringOrUnsignedLong { StringOrUnsignedLong::String(DOMString::new()) } fn ReceiveUnion7(&self) -> StringOrBoolean { StringOrBoolean::Boolean(true) } fn ReceiveUnion8(&self) -> UnsignedLongOrBoolean { UnsignedLongOrBoolean::UnsignedLong(0u32) } fn ReceiveUnion9(&self) -> HTMLElementOrUnsignedLongOrStringOrBoolean { HTMLElementOrUnsignedLongOrStringOrBoolean::Boolean(true) } fn ReceiveSequence(&self) -> Vec<i32> { vec![1] } fn ReceiveInterfaceSequence(&self) -> Vec<Root<Blob>> { vec![Blob::new(self.global().r(), Vec::new(), "")] } fn ReceiveNullableBoolean(&self) -> Option<bool> { Some(false) } fn ReceiveNullableByte(&self) -> Option<i8> { Some(0) } fn ReceiveNullableOctet(&self) -> Option<u8> { Some(0) } fn ReceiveNullableShort(&self) -> Option<i16> { Some(0) } fn ReceiveNullableUnsignedShort(&self) -> Option<u16> { Some(0) } fn ReceiveNullableLong(&self) -> Option<i32> { Some(0) } fn ReceiveNullableUnsignedLong(&self) -> Option<u32> { Some(0) } fn ReceiveNullableLongLong(&self) -> Option<i64> { Some(0) } fn ReceiveNullableUnsignedLongLong(&self) -> Option<u64> { Some(0) } fn ReceiveNullableUnrestrictedFloat(&self) -> Option<f32> { Some(0.) } fn ReceiveNullableFloat(&self) -> Option<Finite<f32>> { Some(Finite::wrap(0.)) } fn ReceiveNullableUnrestrictedDouble(&self) -> Option<f64> { Some(0.) } fn ReceiveNullableDouble(&self) -> Option<Finite<f64>> { Some(Finite::wrap(0.)) } fn ReceiveNullableString(&self) -> Option<DOMString> { Some(DOMString::new()) } fn ReceiveNullableUsvstring(&self) -> Option<USVString> { Some(USVString("".to_owned())) } fn ReceiveNullableByteString(&self) -> Option<ByteString> { Some(ByteString::new(vec!())) } fn ReceiveNullableEnum(&self) -> Option<TestEnum> { Some(TestEnum::_empty) } fn ReceiveNullableInterface(&self) -> Option<Root<Blob>> { Some(Blob::new(self.global().r(), Vec::new(), "")) } fn ReceiveNullableObject(&self, _: *mut JSContext) -> *mut JSObject { ptr::null_mut() } fn ReceiveNullableUnion(&self) -> Option<HTMLElementOrLong> { Some(HTMLElementOrLong::Long(0)) } fn ReceiveNullableUnion2(&self) -> Option<EventOrString> { Some(EventOrString::String(DOMString::new())) } fn ReceiveNullableUnion3(&self) -> Option<StringOrLongSequence> { Some(StringOrLongSequence::String(DOMString::new())) } fn ReceiveNullableUnion4(&self) -> Option<LongSequenceOrBoolean> { Some(LongSequenceOrBoolean::Boolean(true)) } fn ReceiveNullableUnion5(&self) -> Option<UnsignedLongOrBoolean> { Some(UnsignedLongOrBoolean::UnsignedLong(0u32)) } fn ReceiveNullableSequence(&self) -> Option<Vec<i32>> { Some(vec![1]) } fn PassBoolean(&self, _: bool) {} fn PassByte(&self, _: i8) {} fn PassOctet(&self, _: u8) {} fn PassShort(&self, _: i16) {} fn PassUnsignedShort(&self, _: u16) {} fn PassLong(&self, _: i32) {} fn PassUnsignedLong(&self, _: u32) {} fn PassLongLong(&self, _: i64) {} fn PassUnsignedLongLong(&self, _: u64) {} fn PassUnrestrictedFloat(&self, _: f32) {} fn PassFloat(&self, _: Finite<f32>) {} fn PassUnrestrictedDouble(&self, _: f64) {} fn PassDouble(&self, _: Finite<f64>) {} fn PassString(&self, _: DOMString) {} fn PassUsvstring(&self, _: USVString) {} fn PassByteString(&self, _: ByteString) {} fn PassEnum(&self, _: TestEnum) {} fn PassInterface(&self, _: &Blob) {} fn PassUnion(&self, _: HTMLElementOrLong) {} fn PassUnion2(&self, _: EventOrString) {} fn PassUnion3(&self, _: BlobOrString) {} fn PassUnion4(&self, _: StringOrStringSequence) {} fn PassUnion5(&self, _: StringOrBoolean) {} fn PassUnion6(&self, _: UnsignedLongOrBoolean) {} fn PassUnion7(&self, _: StringSequenceOrUnsignedLong) {} fn PassAny(&self, _: *mut JSContext, _: HandleValue) {} fn PassObject(&self, _: *mut JSContext, _: *mut JSObject) {} fn PassCallbackFunction(&self, _: Rc<Function>) {} fn PassCallbackInterface(&self, _: Rc<EventListener>) {} fn PassSequence(&self, _: Vec<i32>) {} fn PassStringSequence(&self, _: Vec<DOMString>) {} fn PassInterfaceSequence(&self, _: Vec<Root<Blob>>) {} fn PassNullableBoolean(&self, _: Option<bool>) {} fn PassNullableByte(&self, _: Option<i8>) {} fn PassNullableOctet(&self, _: Option<u8>) {} fn PassNullableShort(&self, _: Option<i16>) {} fn PassNullableUnsignedShort(&self, _: Option<u16>) {} fn PassNullableLong(&self, _: Option<i32>) {} fn PassNullableUnsignedLong(&self, _: Option<u32>) {} fn PassNullableLongLong(&self, _: Option<i64>) {} fn PassNullableUnsignedLongLong(&self, _: Option<u64>) {} fn PassNullableUnrestrictedFloat(&self, _: Option<f32>) {} fn PassNullableFloat(&self, _: Option<Finite<f32>>) {} fn PassNullableUnrestrictedDouble(&self, _: Option<f64>) {} fn PassNullableDouble(&self, _: Option<Finite<f64>>) {} fn PassNullableString(&self, _: Option<DOMString>) {} fn PassNullableUsvstring(&self, _: Option<USVString>) {} fn PassNullableByteString(&self, _: Option<ByteString>) {} // fn PassNullableEnum(self, _: Option<TestEnum>) {} fn PassNullableInterface(&self, _: Option<&Blob>) {} fn PassNullableObject(&self, _: *mut JSContext, _: *mut JSObject) {} fn PassNullableUnion(&self, _: Option<HTMLElementOrLong>) {} fn PassNullableUnion2(&self, _: Option<EventOrString>) {} fn PassNullableUnion3(&self, _: Option<StringOrLongSequence>) {} fn PassNullableUnion4(&self, _: Option<LongSequenceOrBoolean>) {} fn PassNullableUnion5(&self, _: Option<UnsignedLongOrBoolean>) {} fn PassNullableCallbackFunction(&self, _: Option<Rc<Function>>) {} fn PassNullableCallbackInterface(&self, _: Option<Rc<EventListener>>) {} fn PassNullableSequence(&self, _: Option<Vec<i32>>) {} fn PassOptionalBoolean(&self, _: Option<bool>) {} fn PassOptionalByte(&self, _: Option<i8>) {} fn PassOptionalOctet(&self, _: Option<u8>) {} fn PassOptionalShort(&self, _: Option<i16>) {} fn PassOptionalUnsignedShort(&self, _: Option<u16>) {} fn PassOptionalLong(&self, _: Option<i32>) {} fn PassOptionalUnsignedLong(&self, _: Option<u32>) {} fn PassOptionalLongLong(&self, _: Option<i64>) {} fn PassOptionalUnsignedLongLong(&self, _: Option<u64>) {} fn PassOptionalUnrestrictedFloat(&self, _: Option<f32>) {} fn PassOptionalFloat(&self, _: Option<Finite<f32>>) {} fn PassOptionalUnrestrictedDouble(&self, _: Option<f64>) {} fn PassOptionalDouble(&self, _: Option<Finite<f64>>) {} fn PassOptionalString(&self, _: Option<DOMString>) {} fn PassOptionalUsvstring(&self, _: Option<USVString>) {} fn PassOptionalByteString(&self, _: Option<ByteString>) {} fn PassOptionalEnum(&self, _: Option<TestEnum>) {} fn PassOptionalInterface(&self, _: Option<&Blob>) {} fn PassOptionalUnion(&self, _: Option<HTMLElementOrLong>) {} fn PassOptionalUnion2(&self, _: Option<EventOrString>) {} fn PassOptionalUnion3(&self, _: Option<StringOrLongSequence>) {} fn PassOptionalUnion4(&self, _: Option<LongSequenceOrBoolean>) {} fn PassOptionalUnion5(&self, _: Option<UnsignedLongOrBoolean>) {} fn PassOptionalAny(&self, _: *mut JSContext, _: HandleValue) {} fn PassOptionalObject(&self, _: *mut JSContext, _: Option<*mut JSObject>) {} fn PassOptionalCallbackFunction(&self, _: Option<Rc<Function>>) {} fn PassOptionalCallbackInterface(&self, _: Option<Rc<EventListener>>) {} fn PassOptionalSequence(&self, _: Option<Vec<i32>>) {} fn PassOptionalNullableBoolean(&self, _: Option<Option<bool>>) {} fn PassOptionalNullableByte(&self, _: Option<Option<i8>>) {} fn PassOptionalNullableOctet(&self, _: Option<Option<u8>>) {} fn PassOptionalNullableShort(&self, _: Option<Option<i16>>) {} fn PassOptionalNullableUnsignedShort(&self, _: Option<Option<u16>>) {} fn PassOptionalNullableLong(&self, _: Option<Option<i32>>) {} fn PassOptionalNullableUnsignedLong(&self, _: Option<Option<u32>>) {} fn PassOptionalNullableLongLong(&self, _: Option<Option<i64>>) {} fn PassOptionalNullableUnsignedLongLong(&self, _: Option<Option<u64>>) {} fn PassOptionalNullableUnrestrictedFloat(&self, _: Option<Option<f32>>) {} fn PassOptionalNullableFloat(&self, _: Option<Option<Finite<f32>>>) {} fn PassOptionalNullableUnrestrictedDouble(&self, _: Option<Option<f64>>) {} fn PassOptionalNullableDouble(&self, _: Option<Option<Finite<f64>>>) {} fn PassOptionalNullableString(&self, _: Option<Option<DOMString>>) {} fn PassOptionalNullableUsvstring(&self, _: Option<Option<USVString>>) {} fn PassOptionalNullableByteString(&self, _: Option<Option<ByteString>>) {} // fn PassOptionalNullableEnum(self, _: Option<Option<TestEnum>>) {} fn PassOptionalNullableInterface(&self, _: Option<Option<&Blob>>) {} fn PassOptionalNullableObject(&self, _: *mut JSContext, _: Option<*mut JSObject>) {} fn PassOptionalNullableUnion(&self, _: Option<Option<HTMLElementOrLong>>) {} fn PassOptionalNullableUnion2(&self, _: Option<Option<EventOrString>>) {} fn PassOptionalNullableUnion3(&self, _: Option<Option<StringOrLongSequence>>) {} fn PassOptionalNullableUnion4(&self, _: Option<Option<LongSequenceOrBoolean>>) {} fn PassOptionalNullableUnion5(&self, _: Option<Option<UnsignedLongOrBoolean>>) {} fn PassOptionalNullableCallbackFunction(&self, _: Option<Option<Rc<Function>>>) {} fn PassOptionalNullableCallbackInterface(&self, _: Option<Option<Rc<EventListener>>>) {} fn PassOptionalNullableSequence(&self, _: Option<Option<Vec<i32>>>) {} fn PassOptionalBooleanWithDefault(&self, _: bool) {} fn PassOptionalByteWithDefault(&self, _: i8) {} fn PassOptionalOctetWithDefault(&self, _: u8) {} fn PassOptionalShortWithDefault(&self, _: i16) {} fn PassOptionalUnsignedShortWithDefault(&self, _: u16) {} fn PassOptionalLongWithDefault(&self, _: i32) {} fn PassOptionalUnsignedLongWithDefault(&self, _: u32) {} fn PassOptionalLongLongWithDefault(&self, _: i64) {} fn PassOptionalUnsignedLongLongWithDefault(&self, _: u64) {} fn PassOptionalStringWithDefault(&self, _: DOMString) {} fn PassOptionalUsvstringWithDefault(&self, _: USVString) {} fn PassOptionalEnumWithDefault(&self, _: TestEnum) {} fn PassOptionalNullableBooleanWithDefault(&self, _: Option<bool>) {} fn PassOptionalNullableByteWithDefault(&self, _: Option<i8>) {} fn PassOptionalNullableOctetWithDefault(&self, _: Option<u8>) {} fn PassOptionalNullableShortWithDefault(&self, _: Option<i16>) {} fn PassOptionalNullableUnsignedShortWithDefault(&self, _: Option<u16>) {} fn PassOptionalNullableLongWithDefault(&self, _: Option<i32>) {} fn PassOptionalNullableUnsignedLongWithDefault(&self, _: Option<u32>) {} fn PassOptionalNullableLongLongWithDefault(&self, _: Option<i64>) {} fn PassOptionalNullableUnsignedLongLongWithDefault(&self, _: Option<u64>) {} // fn PassOptionalNullableUnrestrictedFloatWithDefault(self, _: Option<f32>) {} // fn PassOptionalNullableFloatWithDefault(self, _: Option<Finite<f32>>) {} // fn PassOptionalNullableUnrestrictedDoubleWithDefault(self, _: Option<f64>) {} // fn PassOptionalNullableDoubleWithDefault(self, _: Option<Finite<f64>>) {} fn PassOptionalNullableStringWithDefault(&self, _: Option<DOMString>) {} fn PassOptionalNullableUsvstringWithDefault(&self, _: Option<USVString>) {} fn PassOptionalNullableByteStringWithDefault(&self, _: Option<ByteString>) {} // fn PassOptionalNullableEnumWithDefault(self, _: Option<TestEnum>) {} fn PassOptionalNullableInterfaceWithDefault(&self, _: Option<&Blob>) {} fn PassOptionalNullableObjectWithDefault(&self, _: *mut JSContext, _: *mut JSObject) {} fn PassOptionalNullableUnionWithDefault(&self, _: Option<HTMLElementOrLong>) {} fn PassOptionalNullableUnion2WithDefault(&self, _: Option<EventOrString>) {} // fn PassOptionalNullableCallbackFunctionWithDefault(self, _: Option<Function>) {} fn PassOptionalNullableCallbackInterfaceWithDefault(&self, _: Option<Rc<EventListener>>) {} fn PassOptionalAnyWithDefault(&self, _: *mut JSContext, _: HandleValue) {} fn PassOptionalNullableBooleanWithNonNullDefault(&self, _: Option<bool>) {} fn PassOptionalNullableByteWithNonNullDefault(&self, _: Option<i8>) {} fn PassOptionalNullableOctetWithNonNullDefault(&self, _: Option<u8>) {} fn PassOptionalNullableShortWithNonNullDefault(&self, _: Option<i16>) {} fn PassOptionalNullableUnsignedShortWithNonNullDefault(&self, _: Option<u16>) {} fn PassOptionalNullableLongWithNonNullDefault(&self, _: Option<i32>) {} fn PassOptionalNullableUnsignedLongWithNonNullDefault(&self, _: Option<u32>) {} fn PassOptionalNullableLongLongWithNonNullDefault(&self, _: Option<i64>) {} fn PassOptionalNullableUnsignedLongLongWithNonNullDefault(&self, _: Option<u64>) {} // fn PassOptionalNullableUnrestrictedFloatWithNonNullDefault(self, _: Option<f32>) {} // fn PassOptionalNullableFloatWithNonNullDefault(self, _: Option<Finite<f32>>) {} // fn PassOptionalNullableUnrestrictedDoubleWithNonNullDefault(self, _: Option<f64>) {} // fn PassOptionalNullableDoubleWithNonNullDefault(self, _: Option<Finite<f64>>) {} fn PassOptionalNullableStringWithNonNullDefault(&self, _: Option<DOMString>) {} fn PassOptionalNullableUsvstringWithNonNullDefault(&self, _: Option<USVString>) {} // fn PassOptionalNullableEnumWithNonNullDefault(self, _: Option<TestEnum>) {} fn PassVariadicBoolean(&self, _: Vec<bool>) {} fn PassVariadicBooleanAndDefault(&self, _: bool, _: Vec<bool>) {} fn PassVariadicByte(&self, _: Vec<i8>) {} fn PassVariadicOctet(&self, _: Vec<u8>) {} fn PassVariadicShort(&self, _: Vec<i16>) {} fn PassVariadicUnsignedShort(&self, _: Vec<u16>) {} fn PassVariadicLong(&self, _: Vec<i32>) {} fn PassVariadicUnsignedLong(&self, _: Vec<u32>) {} fn PassVariadicLongLong(&self, _: Vec<i64>) {} fn PassVariadicUnsignedLongLong(&self, _: Vec<u64>) {} fn PassVariadicUnrestrictedFloat(&self, _: Vec<f32>) {} fn PassVariadicFloat(&self, _: Vec<Finite<f32>>) {} fn PassVariadicUnrestrictedDouble(&self, _: Vec<f64>) {} fn PassVariadicDouble(&self, _: Vec<Finite<f64>>) {} fn PassVariadicString(&self, _: Vec<DOMString>) {} fn PassVariadicUsvstring(&self, _: Vec<USVString>) {} fn PassVariadicByteString(&self, _: Vec<ByteString>) {} fn PassVariadicEnum(&self, _: Vec<TestEnum>) {} fn PassVariadicInterface(&self, _: &[&Blob]) {} fn PassVariadicUnion(&self, _: Vec<HTMLElementOrLong>) {} fn PassVariadicUnion2(&self, _: Vec<EventOrString>) {} fn PassVariadicUnion3(&self, _: Vec<BlobOrString>) {} fn PassVariadicUnion4(&self, _: Vec<BlobOrBoolean>) {} fn PassVariadicUnion5(&self, _: Vec<StringOrUnsignedLong>) {} fn PassVariadicUnion6(&self, _: Vec<UnsignedLongOrBoolean>) {} fn PassVariadicAny(&self, _: *mut JSContext, _: Vec<HandleValue>) {} fn PassVariadicObject(&self, _: *mut JSContext, _: Vec<*mut JSObject>) {} } impl TestBinding { pub fn BooleanAttributeStatic(_: GlobalRef) -> bool { false } pub fn SetBooleanAttributeStatic(_: GlobalRef, _: bool) {} pub fn ReceiveVoidStatic(_: GlobalRef) {} }<|fim▁end|>
fn GetInterfaceAttributeNullable(&self) -> Option<Root<Blob>> {
<|file_name|>kblink.directive.js<|end_file_name|><|fim▁begin|>(function() { 'use strict'; angular.module('facetApp') .directive('kblink', function() { return { restrict: 'EC', scope: { href: '@' }, transclude: true, controller: ['$scope', 'popoverService', function($scope, popoverService){ if (!$scope.href) return; $scope.image = false; $scope.lifespan = ''; popoverService.getHrefPopover($scope.href).then(function(data) { if (data.length) data = data[0]; $scope.label = data.label; $scope.link = '#!/henkilo/'+ (data.id).replace(/^.+?(p[0-9_]+)$/, '$1'); // check if lifespan contains any numbers if ((new RegExp(/\d/)).test(data.lifespan)) { // remove leading zeros (0800-0900) -> (800-900)<|fim▁hole|> $scope.lifespan = data.lifespan; } if (data.hasOwnProperty('image')) $scope.image = data.image; }); }], template: '<a uib-popover-template="\'views/tooltips/personTooltipTemplate.html\'" popover-trigger="\'mouseenter\'" ng-href="{{ link }}" ng-transclude></a>' }}); })();<|fim▁end|>
data.lifespan = data.lifespan.replace(/(\D)0/g, "$1");
<|file_name|>admin.py<|end_file_name|><|fim▁begin|>from django.contrib import admin from .models import CustomUser, Equipment, Search, TagManagement<|fim▁hole|> admin.site.register(CustomUser) admin.site.register(Equipment) admin.site.register(Search) admin.site.register(Reserved) admin.site.register(Request) admin.site.register(Vote) admin.site.register(Log) admin.site.register(TagManagement) admin.site.register(Tag)<|fim▁end|>
from .models import Reserved, Request, Vote, Log, Tag
<|file_name|>aname2lname.rs<|end_file_name|><|fim▁begin|>extern crate krb5; fn main() { let ctx = match krb5::Context::new() { Ok(ctx) => ctx, Err(e) => panic!("Failure: {}", e) }; println!("Success, got context: {:?}", ctx);<|fim▁hole|> Err(e) => panic!("Failure: {}", e), }; println!("Success, translated name: {}", lname); }<|fim▁end|>
let lname = match ctx.localname("[email protected]") { Ok(lname) => lname,
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>//! Runners are responsible for executing a test suite's examples. mod configuration; mod observer; pub use runner::configuration::*; pub use runner::observer::*; use std::borrow::Borrow; use std::cell::Cell; use std::ops::{Deref, DerefMut}; use std::panic; #[cfg(not(test))] use std::process; use std::sync::{Arc, Mutex}; use time::PreciseTime; use rayon::prelude::*; use block::Block; use block::Suite; use block::Context; use block::Example; use report::{Report, BlockReport}; use report::ContextReport; use report::SuiteReport; use report::ExampleReport; use visitor::TestSuiteVisitor; /// Runner for executing a test suite's examples. pub struct Runner { pub configuration: configuration::Configuration, observers: Vec<Arc<RunnerObserver>>, should_exit: Mutex<Cell<bool>>, } impl Runner { pub fn new(configuration: Configuration, observers: Vec<Arc<RunnerObserver>>) -> Runner { Runner { configuration: configuration, observers: observers, should_exit: Mutex::new(Cell::new(false)), } } } impl Runner { pub fn run<T>(&self, suite: &Suite<T>) -> SuiteReport where T: Clone + Send + Sync + ::std::fmt::Debug, { let mut environment = suite.environment.clone(); self.prepare_before_run(); let report = self.visit(suite, &mut environment); self.clean_after_run(); if let Ok(mut mutex_guard) = self.should_exit.lock() { *mutex_guard.deref_mut().get_mut() |= report.is_failure(); } report } fn broadcast<F>(&self, mut handler: F) where F: FnMut(&RunnerObserver), { for observer in &self.observers { handler(observer.borrow()); } } fn wrap_all<T, U, F>(&self, context: &Context<T>, environment: &mut T, wrapped_block: F) -> U where F: Fn(&mut T) -> U, { for before_function in context.before_all.iter() { before_function(environment); } let result = wrapped_block(environment); for after_function in context.after_all.iter() { after_function(environment); } result } fn wrap_each<T, U, F>(&self, context: &Context<T>, environment: &mut T, wrapped_block: F) -> U where F: Fn(&mut T) -> U, { for before_function in context.before_each.iter() { before_function(environment); } let result = wrapped_block(environment); for after_function in context.after_each.iter() { after_function(environment); } result } fn evaluate_blocks_parallel<T>(&self, context: &Context<T>, environment: &T) -> Vec<BlockReport> where T: Clone + Send + Sync + ::std::fmt::Debug, { context .blocks .par_iter() .map(|block| self.evaluate_block(block, context, environment)) .collect() } fn evaluate_blocks_serial<T>(&self, context: &Context<T>, environment: &T) -> Vec<BlockReport> where T: Clone + Send + Sync + ::std::fmt::Debug, { context .blocks .iter() .map(|block| self.evaluate_block(block, context, environment)) .collect() } fn evaluate_block<T>( &self, block: &Block<T>, context: &Context<T>, environment: &T, ) -> BlockReport where T: Clone + Send + Sync + ::std::fmt::Debug, { let mut environment = environment.clone(); self.wrap_each(context, &mut environment, |environment| { self.visit(block, environment) }) } fn prepare_before_run(&self) { panic::set_hook(Box::new(|_panic_info| { // XXX panics already catched at the test call site, don't output the trace in stdout })); } fn clean_after_run(&self) { // XXX reset panic hook back to default hook: let _ = panic::take_hook(); } } #[cfg(test)] impl Default for Runner { /// Used for testing fn default() -> Self { Runner::new(Configuration::default(), vec!()) } } impl Drop for Runner { fn drop(&mut self) { let should_exit = if let Ok(mutex_guard) = self.should_exit.lock() { mutex_guard.deref().get() } else { false }; if self.configuration.exit_on_failure && should_exit { // XXX Cargo test failure returns 101. // // > "We use 101 as the standard failure exit code because it's something unique // > that the test runner can check for in run-fail tests (as opposed to something // > like 1, which everybody uses). I don't expect this behavior can ever change. // > This behavior probably dates to before 2013, // > all the way back to the creation of compiletest." – @brson #[cfg(not(test))] process::exit(101); #[cfg(test)] panic!("test suite failed !") } }<|fim▁hole|>} impl<T> TestSuiteVisitor<Suite<T>> for Runner where T: Clone + Send + Sync + ::std::fmt::Debug, { type Environment = T; type Output = SuiteReport; fn visit(&self, suite: &Suite<T>, environment: &mut Self::Environment) -> Self::Output { self.broadcast(|handler| handler.enter_suite(self, &suite.header)); let report = SuiteReport::new( suite.header.clone(), self.visit(&suite.context, environment), ); self.broadcast(|handler| handler.exit_suite(self, &suite.header, &report)); report } } impl<T> TestSuiteVisitor<Block<T>> for Runner where T: Clone + Send + Sync + ::std::fmt::Debug, { type Environment = T; type Output = BlockReport; fn visit(&self, member: &Block<T>, environment: &mut Self::Environment) -> Self::Output { match member { &Block::Example(ref example) => { let header = example.header.clone(); let report = self.visit(example, environment); BlockReport::Example(header, report) } &Block::Context(ref context) => { let header = context.header.clone(); let report = self.visit(context, &mut environment.clone()); BlockReport::Context(header, report) } } } } impl<T> TestSuiteVisitor<Context<T>> for Runner where T: Clone + Send + Sync + ::std::fmt::Debug, { type Environment = T; type Output = ContextReport; fn visit(&self, context: &Context<T>, environment: &mut Self::Environment) -> Self::Output { if let Some(ref header) = context.header { self.broadcast(|handler| handler.enter_context(self, &header)); } let start_time = PreciseTime::now(); let reports: Vec<_> = self.wrap_all(context, environment, |environment| if self.configuration .parallel { self.evaluate_blocks_parallel(context, environment) } else { self.evaluate_blocks_serial(context, environment) }); let end_time = PreciseTime::now(); let report = ContextReport::new(reports, start_time.to(end_time)); if let Some(ref header) = context.header { self.broadcast(|handler| handler.exit_context(self, &header, &report)); } report } } impl<T> TestSuiteVisitor<Example<T>> for Runner where T: Clone + Send + Sync + ::std::fmt::Debug, { type Environment = T; type Output = ExampleReport; fn visit(&self, example: &Example<T>, environment: &mut Self::Environment) -> Self::Output { self.broadcast(|handler| handler.enter_example(self, &example.header)); let start_time = PreciseTime::now(); let result = (example.function)(environment); let end_time = PreciseTime::now(); let report = ExampleReport::new(result, start_time.to(end_time)); self.broadcast(|handler| handler.exit_example(self, &example.header, &report)); report } } #[cfg(test)] mod tests { use super::*; mod runner { use super::*; #[test] fn it_can_be_instanciated() { // arrange let _ = Runner::new(Configuration::default(), vec!()); // act // assert } mod broadcast { use super::*; use header::*; use std::sync::atomic::*; // XXX blank impl for stubbing impl RunnerObserver for () {} #[test] fn it_calls_the_closure() { // arrange let spy = Arc::new(()); let runner = Runner::new(Configuration::default(), vec!(spy)); let has_been_called = AtomicBool::new(false); // act runner.broadcast(|_| has_been_called.store(true, Ordering::SeqCst)); // assert assert_eq!(true, has_been_called.load(Ordering::SeqCst)); } #[test] fn it_calls_it_once_per_observer() { // arrange let spy1 = Arc::new(()); let spy2 = Arc::new(()); let runner = Runner::new(Configuration::default(), vec![spy1, spy2]); let call_times = AtomicUsize::new(0); // act runner.broadcast(|_| { call_times.fetch_add(1, Ordering::SeqCst); }); // assert assert_eq!(2, call_times.load(Ordering::SeqCst)) } struct ObserverStub { events: Mutex<Vec<(&'static str, SuiteHeader)>>, } impl ObserverStub { fn new() -> Self { ObserverStub { events: Mutex::new(vec!()) } } } // XXX stub implem impl RunnerObserver for ObserverStub { fn enter_suite(&self, _runner: &Runner, header: &SuiteHeader) { let mut vec = self.events.lock().unwrap(); (*vec).push(("enter_suite", header.clone())); } } #[test] fn it_gives_the_observer_as_callback_argument() { // arrange let spy1 = Arc::new(ObserverStub::new()); let expected = SuiteHeader::new(SuiteLabel::Describe, "hello"); let runner = Runner::new(Configuration::default(), vec![spy1.clone()]); // act runner.broadcast(|observer| observer.enter_suite(&runner, &expected.clone())); // assert let lock = spy1.events.lock().expect("no dangling threads"); let res = (*lock).get(0).expect("to have been called once"); assert_eq!(&("enter_suite", expected), res); } } mod wrap_each { use super::*; use std::sync::atomic::*; #[test] fn it_can_be_called() { // arrange let runner = Runner::default(); // act runner.wrap_each(&Context::default(), &mut (), |_| {}) // assert } #[test] fn it_calls_the_closure() { // arrange let runner = Runner::default(); let has_been_called = AtomicBool::new(false); // act runner.wrap_each(&Context::default(), &mut (), |_| has_been_called.store(true, Ordering::SeqCst)); // assert assert_eq!(true, has_been_called.load(Ordering::SeqCst)); } #[test] fn it_calls_the_before_each_callbacks() { // arrange let runner = Runner::default(); let has_been_called = Arc::new(AtomicBool::new(false)); let closure_bool_handler = has_been_called.clone(); let mut context = Context::default(); // act context.before_each(move |_| closure_bool_handler.store(true, Ordering::SeqCst)); runner.wrap_each(&context, &mut (), |_| ()); // assert assert_eq!(true, has_been_called.load(Ordering::SeqCst)); } #[test] fn it_calls_the_after_each_callbacks() { // arrange let runner = Runner::default(); let has_been_called = Arc::new(AtomicBool::new(false)); let closure_bool_handler = has_been_called.clone(); let mut context = Context::default(); // act context.after_each(move |_| closure_bool_handler.store(true, Ordering::SeqCst)); runner.wrap_each(&context, &mut (), |_| ()); // assert assert_eq!(true, has_been_called.load(Ordering::SeqCst)); } #[test] fn it_calls_all_before_each_callbacks() { // arrange let runner = Runner::default(); let call_counter = Arc::new(AtomicUsize::new(0)); let closure_counter_handler1 = call_counter.clone(); let closure_counter_handler2 = call_counter.clone(); let mut context = Context::default(); // act context.before_each(move |_| { closure_counter_handler1.fetch_add(1, Ordering::SeqCst); }); context.before_each(move |_| { closure_counter_handler2.fetch_add(1, Ordering::SeqCst); }); runner.wrap_each(&context, &mut (), |_| ()); // assert assert_eq!(2, call_counter.load(Ordering::SeqCst)); } #[test] fn it_calls_all_after_each_callbacks() { // arrange let runner = Runner::default(); let call_counter = Arc::new(AtomicUsize::new(0)); let closure_counter_handler1 = call_counter.clone(); let closure_counter_handler2 = call_counter.clone(); let mut context = Context::default(); // act context.after_each(move |_| { closure_counter_handler1.fetch_add(1, Ordering::SeqCst); }); context.after_each(move |_| { closure_counter_handler2.fetch_add(1, Ordering::SeqCst); }); runner.wrap_each(&context, &mut (), |_| ()); // assert assert_eq!(2, call_counter.load(Ordering::SeqCst)); } #[test] fn it_calls_before_each_hook_before_the_main_closure() { // arrange let runner = Runner::default(); let last_caller_id = Arc::new(AtomicUsize::new(0)); let last_caller_handler1 = last_caller_id.clone(); let last_caller_handler2 = last_caller_id.clone(); let mut context = Context::default(); // act context.before_each(move |_| { last_caller_handler1.store(1, Ordering::SeqCst); }); runner.wrap_each(&context, &mut (), |_| { last_caller_handler2.store(2, Ordering::SeqCst); }); // assert assert_eq!(2, last_caller_id.load(Ordering::SeqCst)); } #[test] fn it_calls_after_each_hook_after_the_main_closure() { // arrange let runner = Runner::default(); let last_caller_id = Arc::new(AtomicUsize::new(0)); let last_caller_handler1 = last_caller_id.clone(); let last_caller_handler2 = last_caller_id.clone(); let mut context = Context::default(); // act context.after_each(move |_| { last_caller_handler1.store(1, Ordering::SeqCst); }); runner.wrap_each(&context, &mut (), |_| { last_caller_handler2.store(2, Ordering::SeqCst); }); // assert assert_eq!(1, last_caller_id.load(Ordering::SeqCst)); } } mod wrap_all { use super::*; use std::sync::atomic::*; #[test] fn it_can_be_called() { // arrange let runner = Runner::default(); // act runner.wrap_all(&Context::default(), &mut (), |_| {}) // assert } #[test] fn it_calls_the_closure() { // arrange let runner = Runner::default(); let has_been_called = AtomicBool::new(false); // act runner.wrap_all(&Context::default(), &mut (), |_| has_been_called.store(true, Ordering::SeqCst)); // assert assert_eq!(true, has_been_called.load(Ordering::SeqCst)); } #[test] fn it_calls_the_before_all_callbacks() { // arrange let runner = Runner::default(); let has_been_called = Arc::new(AtomicBool::new(false)); let closure_bool_handler = has_been_called.clone(); let mut context = Context::default(); // act context.before_all(move |_| closure_bool_handler.store(true, Ordering::SeqCst)); runner.wrap_all(&context, &mut (), |_| ()); // assert assert_eq!(true, has_been_called.load(Ordering::SeqCst)); } #[test] fn it_calls_the_after_all_callbacks() { // arrange let runner = Runner::default(); let has_been_called = Arc::new(AtomicBool::new(false)); let closure_bool_handler = has_been_called.clone(); let mut context = Context::default(); // act context.after_all(move |_| closure_bool_handler.store(true, Ordering::SeqCst)); runner.wrap_all(&context, &mut (), |_| ()); // assert assert_eq!(true, has_been_called.load(Ordering::SeqCst)); } #[test] fn it_calls_all_before_all_callbacks() { // arrange let runner = Runner::default(); let call_counter = Arc::new(AtomicUsize::new(0)); let closure_counter_handler1 = call_counter.clone(); let closure_counter_handler2 = call_counter.clone(); let mut context = Context::default(); // act context.before_all(move |_| { closure_counter_handler1.fetch_add(1, Ordering::SeqCst); }); context.before_all(move |_| { closure_counter_handler2.fetch_add(1, Ordering::SeqCst); }); runner.wrap_all(&context, &mut (), |_| ()); // assert assert_eq!(2, call_counter.load(Ordering::SeqCst)); } #[test] fn it_calls_all_after_all_callbacks() { // arrange let runner = Runner::default(); let call_counter = Arc::new(AtomicUsize::new(0)); let closure_counter_handler1 = call_counter.clone(); let closure_counter_handler2 = call_counter.clone(); let mut context = Context::default(); // act context.after_all(move |_| { closure_counter_handler1.fetch_add(1, Ordering::SeqCst); }); context.after_all(move |_| { closure_counter_handler2.fetch_add(1, Ordering::SeqCst); }); runner.wrap_all(&context, &mut (), |_| ()); // assert assert_eq!(2, call_counter.load(Ordering::SeqCst)); } #[test] fn it_calls_before_all_hook_before_the_main_closure() { // arrange let runner = Runner::default(); let last_caller_id = Arc::new(AtomicUsize::new(0)); let last_caller_handler1 = last_caller_id.clone(); let last_caller_handler2 = last_caller_id.clone(); let mut context = Context::default(); // act context.before_all(move |_| { last_caller_handler1.store(1, Ordering::SeqCst); }); runner.wrap_all(&context, &mut (), |_| { last_caller_handler2.store(2, Ordering::SeqCst); }); // assert assert_eq!(2, last_caller_id.load(Ordering::SeqCst)); } #[test] fn it_calls_after_all_hook_after_the_main_closure() { // arrange let runner = Runner::default(); let last_caller_id = Arc::new(AtomicUsize::new(0)); let last_caller_handler1 = last_caller_id.clone(); let last_caller_handler2 = last_caller_id.clone(); let mut context = Context::default(); // act context.after_all(move |_| { last_caller_handler1.store(1, Ordering::SeqCst); }); runner.wrap_all(&context, &mut (), |_| { last_caller_handler2.store(2, Ordering::SeqCst); }); // assert assert_eq!(1, last_caller_id.load(Ordering::SeqCst)); } } } mod impl_drop_for_runner { use super::*; #[test] #[should_panic] fn it_should_abort() { // arrange let config = ConfigurationBuilder::default() .exit_on_failure(true) .build() .unwrap(); // act { let runner = Runner::new(config, vec!()); (*runner.should_exit.lock().unwrap()).set(true); } // assert // test should panic } } mod impl_visitor_example_for_runner { use super::*; use header::*; use report::*; use std::sync::atomic::*; #[derive(Default, Debug, Clone)] struct SpyObserver { enter_example: Arc<AtomicBool>, exit_example: Arc<AtomicBool>, } impl RunnerObserver for SpyObserver { fn enter_example(&self, _runner: &Runner, _header: &ExampleHeader) { self.enter_example.store(true, Ordering::SeqCst) } fn exit_example(&self, _runner: &Runner, _header: &ExampleHeader, _report: &ExampleReport) { self.exit_example.store(true, Ordering::SeqCst) } } #[test] fn it_can_be_called() { // arrange let runner = Runner::default(); let example = Example::fixture_success(); // act // assert runner.visit(&example, &mut ()); } #[test] fn it_calls_observer_hooks() { // arrange let spy = Arc::new(SpyObserver::default()); let runner = Runner::new(Configuration::default(), vec![spy.clone()]); let example = Example::fixture_success(); // act runner.visit(&example, &mut ()); // assert assert!(true == spy.enter_example.load(Ordering::SeqCst)); assert!(true == spy.exit_example.load(Ordering::SeqCst)) } #[test] fn it_gives_an_env_to_the_example() { // arrange let runner = Runner::default(); let mut environment = Arc::new(AtomicBool::new(false)); // act let example = Example::new(ExampleHeader::default(), |env : &Arc<AtomicBool>| { env.store(true, Ordering::SeqCst); ExampleResult::Success }); runner.visit(&example, &mut environment); // assert assert_eq!(true, environment.load(Ordering::SeqCst)); } } mod impl_visitor_block_for_runner { use super::*; #[test] fn it_can_be_called() { // arrange let runner = Runner::default(); let block = Block::Example(Example::fixture_success()); // act // assert runner.visit(&block, &mut ()); } } }<|fim▁end|>
<|file_name|>classifier.py<|end_file_name|><|fim▁begin|>import logging import os import cv2 import numpy as np import pickle from skimage.feature import hog from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.svm import LinearSVC logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG) logger = logging.getLogger(__name__) MODEL_CHECKPOINT = 'data/model.p' class Params(object): def __init__(self, color_space, hog_params, size, nbins, heatmap_threshold): self.color_space = color_space self.hog_params = hog_params self.nbins = nbins self.size = size self.heatmap_threshold = heatmap_threshold def spatial(img, size): return cv2.resize(img, size).ravel() def color_histogram(img, bins): ch = [] for i in range(img.shape[2]): ch.append(np.histogram(img[:, :, i], bins=bins)) return np.concatenate((ch[0][0], ch[1][0], ch[2][0])) def hog_features(img, params): output = [] for ch in range(img.shape[2]): feat = hog(img[:,:,ch], **params) output.append(feat) return output def extract_features(path, params): logger.debug('[extract_features] start...') features = [] for filename in path: img = cv2.imread(filename, cv2.IMREAD_COLOR) if params.color_space: feature_image = cv2.cvtColor(img, params.color_space) spatial_feat = spatial(feature_image, params.size) hist_feat = color_histogram(feature_image, params.nbins) hog_feat = np.ravel(hog_features(feature_image, params.hog_params)) features.append(np.concatenate((spatial_feat, hist_feat, hog_feat))) return features <|fim▁hole|>def train(car_features, non_car_features): logger.debug('[train] start') x = np.vstack((car_features, non_car_features)).astype(np.float64) scaler = StandardScaler().fit(x) scaled_x = scaler.transform(x) y = np.hstack((np.ones(len(car_features)), np.zeros(len(non_car_features)))) split_params = { 'test_size': 0.2, 'random_state': np.random.randint(0, 100) } x_train, x_test, y_train, y_test = train_test_split(scaled_x, y, **split_params) clf = LinearSVC() clf.fit(x_train, y_train) accuracy = clf.score(x_test, y_test) logger.debug('[train] accuracy = %s' % accuracy) return clf, scaler<|fim▁end|>
<|file_name|>getVariablesHandler.ts<|end_file_name|><|fim▁begin|>import helper from '../../helper'; import GetVariablesHandler from '../getVariablesHandler'; import Registry from '../../../registry'; /** Handler that takes care of getting all the variables for protractor. */ class ProtractorGetVariablesHandler extends GetVariablesHandler {<|fim▁hole|> helper.protractor.addSessionIfNonExisting(registry, ngApimockId); return registry.sessions[ngApimockId].variables; } } export default ProtractorGetVariablesHandler;<|fim▁end|>
/** @inheritDoc */ getVariables(registry: Registry, ngApimockId?: string): {} {
<|file_name|>params.go<|end_file_name|><|fim▁begin|>package dvb // Device delivery sytem type DeliverySystem uint32 const ( SysUndefined DeliverySystem = iota SysDVBCAnnexA SysDVBCAnnexB SysDVBT SysDSS SysDVBS SysDVBS2 SysDVBH SysISDBT SysISDBS SysISDBC SysATSC SysATSCMH SysDMBTH SysCMMB SysDAB SysDVBT2 SysTURBO SysDVBCAnnexC ) var dsn = []string{ "Undefined", "DVB-C Annex AC", "DVB-C Annex B", "DVB-T", "DSS", "DVB-S", "DVB-S2", "DVB-H", "ISDB-T", "ISDB-S", "ISDB-C", "ATSC", "ATSC-MH", "DMBT-H", "CMMB", "DAB", "DVB-T2", "TURBO", } func (ds DeliverySystem) String() string { if ds > DeliverySystem(len(dsn)) { return "unknown" } return dsn[ds] } type Inversion uint32 const ( InversionOff Inversion = iota InversionOn InversionAuto ) var inversionNames = []string{ "off", "on", "auto", } func (i Inversion) String() string { if i > InversionAuto { return "unknown" } return inversionNames[i] } type CodeRate uint32 const ( FECNone CodeRate = iota FEC12 FEC23 FEC34 FEC45 FEC56 FEC67 FEC78 FEC89 FECAuto FEC35 FEC910 ) var codeRateNames = []string{ "none", "1/2", "2/3", "3/4", "4/5", "5/6", "6/7", "7/8", "8/9", "auto", "3/5", "9/10", } func (cr CodeRate) String() string { if cr > FEC910 { return "unknown" } return codeRateNames[cr] } type Modulation uint32 const ( QPSK Modulation = iota QAM16 QAM32 QAM64 QAM128 QAM256 QAMAuto VSB8 VSB16 PSK8 APSK16 APSK32 DQPSK ) var modulationNames = []string{ "QPSK", "QAM16", "QAM32", "QAM64", "QAM128", "QAM256", "QAMAuto", "VSB8", "VSB16", "PSK8", "APSK16", "APSK32", "DQPSK", } func (m Modulation) String() string { if m > DQPSK { return "unknown" } return modulationNames[m] } type TxMode uint32 const ( TxMode2k TxMode = iota TxMode8k TxModeAuto TxMode4k TxMode1k TxMode16k TxMode32k ) var txModeNames = []string{ "2k", "8k", "auto", "4k", "1k", "16k", "32k", } func (tm TxMode) String() string { if tm > TxMode32k { return "unknown" } return txModeNames[tm] } type Guard uint32 const ( Guard32 Guard = iota // 1/32 Guard16 // 1/16 Guard8 // 1/8 Guard4 // 1/4 GuardAuto<|fim▁hole|> Guard128 // 1/128 GuardN128 // 19/128 GuardN256 // 19/256 ) var guardNames = []string{ "1/32", "1/16", "1/8", "1/4", "auto", "1/128", "19/128", "19/256", } func (gi Guard) String() string { if gi > GuardN256 { return "unknown" } return guardNames[gi] } type Hierarchy uint32 const ( HierarchyNone Hierarchy = iota Hierarchy1 Hierarchy2 Hierarchy4 HierarchyAuto ) var hierarchyNames = []string{ "none", "uniform", "HP/LP=2", "HP/LP=4", "auto", } func (h Hierarchy) String() string { if h > HierarchyAuto { return "unknown" } return hierarchyNames[h] } // DVB-S2 pilot type Pilot uint32 const ( PilotOn Pilot = iota PilotOff PilotAuto ) // DVB-S2 rolloff type Rolloff uint32 const ( Rolloff35 Rolloff = iota // Implied value in DVB-S, default for DVB-S2 Rolloff20 Rolloff25 RolloffAuto )<|fim▁end|>
<|file_name|>exp-ae-celeba-mafl-30.py<|end_file_name|><|fim▁begin|>import tensorflow as tf import os import sys from copy import copy from model.pipeline import Pipeline from tensorflow.python import debug as tf_debug if __name__ == "__main__":<|fim▁hole|> num_keypoints = 30 patch_feature_dim = 8 decoding_levels = 5 kp_transform_loss = 1e4 base_recon_weight = 0.1 recon_weight = Pipeline.ValueScheduler( "piecewise_constant", [100000, 200000], [base_recon_weight, base_recon_weight*100, base_recon_weight*1000] ) base_learning_rate=0.01 learning_rate = Pipeline.ValueScheduler( "piecewise_constant", [100000, 200000], [base_learning_rate, base_learning_rate*0.1, base_learning_rate*0.01] ) keypoint_separation_bandwidth=0.04 keypoint_separation_loss_weight = 10 opt = { "optimizer": "Adam", "data_name": "celeba_mafl_100x100_80x80", "recon_name": "gaussian_fixedvar_in_01", "encoder_name": "general_80x80", "decoder_name": "general_80x80", "latent_dim": num_keypoints*2+(num_keypoints+1)*patch_feature_dim, "train_color_jittering": True, "train_random_mirroring": False, "train_batch_size": 8, "train_shuffle_capacity": 1000, "learning_rate": learning_rate, "max_epochs": 2000, "weight_decay": 1e-6, "test_steps": 5000, "test_limit": 200, "recon_weight": recon_weight, } opt["encoder_options"] = { "keypoint_num": num_keypoints, "patch_feature_dim": patch_feature_dim, "ae_recon_type": opt["recon_name"], "keypoint_concentration_loss_weight": 100., "keypoint_axis_balancing_loss_weight": 200., "keypoint_separation_loss_weight": keypoint_separation_loss_weight, "keypoint_separation_bandwidth": keypoint_separation_bandwidth, "keypoint_transform_loss_weight": kp_transform_loss, "keypoint_decoding_heatmap_levels": decoding_levels, "keypoint_decoding_heatmap_level_base": 0.5**(1/2), "image_channels": 3, } opt["decoder_options"] = copy(opt["encoder_options"]) # ------------------------------------- model_dir = os.path.join("results/celeba_30") vp = Pipeline(None, opt, model_dir=model_dir) print(vp.opt) with vp.graph.as_default(): sess = vp.create_session() vp.run_full_train(sess, restore=True) vp.run_full_test(sess)<|fim▁end|>
<|file_name|>th.js<|end_file_name|><|fim▁begin|>// Node if (typeof module !== 'undefined' && module.exports) { var numeral = require('../../numeral'); var expect = require('chai').expect; var language = require('../../languages/th'); } describe('Language: th', function() { before(function() { numeral.language('th', language); numeral.language('th'); }); after(function() { numeral.reset(); }); describe('Number', function() { it('should format a number', function() { var tests = [ [10000,'0,0.0000','10,000.0000'], [10000.23,'0,0','10,000'], [-10000,'0,0.0','-10,000.0'], [10000.1234,'0.000','10000.123'], [-10000,'(0,0.0000)','(10,000.0000)'], [-0.23,'.00','-.23'], [-0.23,'(.00)','(.23)'], [0.23,'0.00000','0.23000'], [1230974,'0.0a','1.2ล้าน'], [1460,'0a','1พัน'], [-104000,'0a','-104พัน'], [1,'0o','1.'], [52,'0o','52.'], [23,'0o','23.'], [100,'0o','100.'], [1,'0[.]0','1'] ]; for (var i = 0; i < tests.length; i++) { expect(numeral(tests[i][0]).format(tests[i][1])).to.equal(tests[i][2]); } }); }); describe('Currency', function() { it('should format a currency', function() { var tests = [ [1000.234,'$0,0.00','฿1,000.23'], [-1000.234,'($0,0)','(฿1,000)'], [-1000.234,'$0.00','-฿1000.23'], [1230974,'($0.00a)','฿1.23ล้าน'] ]; for (var i = 0; i < tests.length; i++) { expect(numeral(tests[i][0]).format(tests[i][1])).to.equal(tests[i][2]); } }); }); describe('Percentages', function() { it('should format a percentages', function() { var tests = [ [1,'0%','100%'], [0.974878234,'0.000%','97.488%'], [-0.43,'0%','-43%'], [0.43,'(0.000%)','43.000%'] ]; for (var i = 0; i < tests.length; i++) { expect(numeral(tests[i][0]).format(tests[i][1])).to.equal(tests[i][2]);<|fim▁hole|> }); describe('Unformat', function() { it('should unformat', function() { var tests = [ ['10,000.123',10000.123], ['(0.12345)',-0.12345], ['(฿1.23ล้าน)',-1230000], ['10พัน',10000], ['-10พัน',-10000], ['23.',23], ['฿10,000.00',10000], ['-76%',-0.76], ['2:23:57',8637] ]; for (var i = 0; i < tests.length; i++) { expect(numeral().unformat(tests[i][0])).to.equal(tests[i][1]); } }); }); });<|fim▁end|>
} });
<|file_name|>_config.js<|end_file_name|><|fim▁begin|>export default { props: { things: [ { id: 1, name: 'a' }, { id: 2, name: 'b' }, { id: 3, name: 'c' }, { id: 4, name: 'd' }, { id: 5, name: 'e' } ] }, html: ` <div>a</div> <div>b</div> <div>c</div> <div>d</div> <div>e</div> `, test({ assert, component, raf }) { let divs = document.querySelectorAll('div'); divs.forEach(div => { div.getBoundingClientRect = function() { const index = [...this.parentNode.children].indexOf(this); const top = index * 30; return { left: 0, right: 100, top, bottom: top + 20 }; };<|fim▁hole|> { id: 5, name: 'e' }, { id: 2, name: 'b' }, { id: 3, name: 'c' }, { id: 4, name: 'd' }, { id: 1, name: 'a' } ]; divs = document.querySelectorAll('div'); assert.equal(divs[0].dy, 120); assert.equal(divs[4].dy, -120); raf.tick(50); assert.equal(divs[0].dy, 60); assert.equal(divs[4].dy, -60); raf.tick(100); assert.equal(divs[0].dy, 0); assert.equal(divs[4].dy, 0); } };<|fim▁end|>
}); component.things = [
<|file_name|>vaapi_picture_native_pixmap_angle.cc<|end_file_name|><|fim▁begin|>// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "media/gpu/vaapi/vaapi_picture_native_pixmap_angle.h" #include "media/gpu/vaapi/va_surface.h" #include "media/gpu/vaapi/vaapi_wrapper.h" #include "ui/gfx/x/connection.h" #include "ui/gfx/x/future.h" #include "ui/gfx/x/xproto.h" #include "ui/gl/gl_bindings.h" #include "ui/gl/gl_image_egl_pixmap.h" #include "ui/gl/scoped_binders.h" namespace media { namespace { x11::Pixmap CreatePixmap(const gfx::Size& size) { auto* connection = x11::Connection::Get(); if (!connection->Ready()) return x11::Pixmap::None; auto root = connection->default_root(); uint8_t depth = 0; if (auto reply = connection->GetGeometry(root).Sync()) depth = reply->depth; else return x11::Pixmap::None; // TODO(tmathmeyer) should we use the depth from libva instead of root window? auto pixmap = connection->GenerateId<x11::Pixmap>(); uint16_t pixmap_width, pixmap_height; if (!base::CheckedNumeric<int>(size.width()).AssignIfValid(&pixmap_width) || !base::CheckedNumeric<int>(size.height()).AssignIfValid(&pixmap_height)) { return x11::Pixmap::None; } auto req = connection->CreatePixmap( {depth, pixmap, root, pixmap_width, pixmap_height}); if (req.Sync().error) pixmap = x11::Pixmap::None; return pixmap; } } // namespace VaapiPictureNativePixmapAngle::VaapiPictureNativePixmapAngle( scoped_refptr<VaapiWrapper> vaapi_wrapper, const MakeGLContextCurrentCallback& make_context_current_cb, const BindGLImageCallback& bind_image_cb, int32_t picture_buffer_id, const gfx::Size& size, const gfx::Size& visible_size, uint32_t service_texture_id, uint32_t client_texture_id, uint32_t texture_target) : VaapiPictureNativePixmap(std::move(vaapi_wrapper), make_context_current_cb, bind_image_cb, picture_buffer_id, size, visible_size, service_texture_id, client_texture_id, texture_target) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); // Check that they're both not 0 DCHECK(service_texture_id); DCHECK(client_texture_id); } VaapiPictureNativePixmapAngle::~VaapiPictureNativePixmapAngle() { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); if (gl_image_ && make_context_current_cb_.Run()) { gl_image_->ReleaseTexImage(texture_target_); DCHECK_EQ(glGetError(), static_cast<GLenum>(GL_NO_ERROR)); } if (x_pixmap_ != x11::Pixmap::None) x11::Connection::Get()->FreePixmap({x_pixmap_}); } Status VaapiPictureNativePixmapAngle::Allocate(gfx::BufferFormat format) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); if (!(texture_id_ || client_texture_id_)) return StatusCode::kVaapiNoTexture; if (!make_context_current_cb_ || !make_context_current_cb_.Run()) return StatusCode::kVaapiBadContext; auto image = base::MakeRefCounted<gl::GLImageEGLPixmap>(visible_size_, format); if (!image) return StatusCode::kVaapiNoImage; x_pixmap_ = CreatePixmap(visible_size_); if (x_pixmap_ == x11::Pixmap::None) return StatusCode::kVaapiNoPixmap; if (!image->Initialize(x_pixmap_)) return StatusCode::kVaapiFailedToInitializeImage; gl::ScopedTextureBinder texture_binder(texture_target_, texture_id_); if (!image->BindTexImage(texture_target_)) return StatusCode::kVaapiFailedToBindTexture; gl_image_ = image; DCHECK(bind_image_cb_); if (!bind_image_cb_.Run(client_texture_id_, texture_target_, gl_image_, /*can_bind_to_sampler=*/true)) { return StatusCode::kVaapiFailedToBindImage; } return OkStatus(); } bool VaapiPictureNativePixmapAngle::ImportGpuMemoryBufferHandle( gfx::BufferFormat format, gfx::GpuMemoryBufferHandle gpu_memory_buffer_handle) { NOTREACHED(); return false; } bool VaapiPictureNativePixmapAngle::DownloadFromSurface( scoped_refptr<VASurface> va_surface) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); if (!make_context_current_cb_ || !make_context_current_cb_.Run()) return false;<|fim▁hole|> gl::ScopedTextureBinder texture_binder(texture_target_, texture_id_); // GL needs to re-bind the texture after the pixmap content is updated so that // the compositor sees the updated contents (we found this out experimentally) gl_image_->ReleaseTexImage(texture_target_); DCHECK(gfx::Rect(va_surface->size()).Contains(gfx::Rect(visible_size_))); if (!vaapi_wrapper_->PutSurfaceIntoPixmap(va_surface->id(), x_pixmap_, visible_size_)) { return false; } return gl_image_->BindTexImage(texture_target_); } VASurfaceID VaapiPictureNativePixmapAngle::va_surface_id() const { return VA_INVALID_ID; } } // namespace media<|fim▁end|>
DCHECK(texture_id_);
<|file_name|>log_sink_test.go<|end_file_name|><|fim▁begin|>// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package log import ( "fmt" "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/adfin/statster/metrics/core" ) func TestSimpleWrite(t *testing.T) { now := time.Now() batch := core.DataBatch{ Timestamp: now, MetricSets: make(map[string]*core.MetricSet), } batch.MetricSets["pod1"] = &core.MetricSet{ Labels: map[string]string{"bzium": "hocuspocus"}, MetricValues: map[string]core.MetricValue{ "m1": { ValueType: core.ValueInt64, MetricType: core.MetricGauge, IntValue: 31415, }, }, LabeledMetrics: []core.LabeledMetric{ { Name: "lm", MetricValue: core.MetricValue{ MetricType: core.MetricGauge, ValueType: core.ValueInt64, IntValue: 279, }, Labels: map[string]string{ "disk": "hard", }, }, }, } log := batchToString(&batch) assert.True(t, strings.Contains(log, "31415")) assert.True(t, strings.Contains(log, "m1")) assert.True(t, strings.Contains(log, "bzium")) assert.True(t, strings.Contains(log, "hocuspocus")) assert.True(t, strings.Contains(log, "pod1")) assert.True(t, strings.Contains(log, "279"))<|fim▁hole|> assert.True(t, strings.Contains(log, "hard")) assert.True(t, strings.Contains(log, fmt.Sprintf("%s", now))) } func TestSortedOutput(t *testing.T) { const ( label1 = "abcLabel" label2 = "xyzLabel" pod1 = "pod1" pod2 = "pod2" metric1 = "metricA" metric2 = "metricB" ) metricVal := core.MetricValue{ ValueType: core.ValueInt64, MetricType: core.MetricGauge, IntValue: 31415, } metricSet := func(pod string) *core.MetricSet { return &core.MetricSet{ Labels: map[string]string{label2 + pod: pod, label1 + pod: pod}, MetricValues: map[string]core.MetricValue{ metric2 + pod: metricVal, metric1 + pod: metricVal, }, LabeledMetrics: []core.LabeledMetric{}, } } now := time.Now() batch := core.DataBatch{ Timestamp: now, MetricSets: map[string]*core.MetricSet{ pod2: metricSet(pod2), pod1: metricSet(pod1), }, } log := batchToString(&batch) sorted := []string{ pod1, label1 + pod1, label2 + pod1, metric1 + pod1, metric2 + pod1, pod2, label1 + pod2, label2 + pod2, metric1 + pod2, metric2 + pod2, } var ( previous string previousIndex int ) for _, metric := range sorted { metricIndex := strings.Index(log, metric) assert.NotEqual(t, -1, metricIndex, "%q not found", metric) if previous != "" { assert.True(t, previousIndex < metricIndex, "%q should be before %q", previous, metric) } previous = metric previousIndex = metricIndex } }<|fim▁end|>
assert.True(t, strings.Contains(log, "disk"))
<|file_name|>jquery.js<|end_file_name|><|fim▁begin|>$(function() { $('img[data-hover]').hover(function() { $(this).attr('tmp', $(this).attr('src')).attr('src', $(this).attr('data-hover')).attr('data-hover', $(this).attr('tmp')).removeAttr('tmp'); }).each(function() { $('<img>').attr('src', $(this).attr('data-hover')); });; }); jQuery(document).ready(function ($) { var options = { $AutoPlay: true, $SlideDuration: 800, $AutoPlayInterval: 2000 };<|fim▁hole|>});<|fim▁end|>
var jssor_slider1 = new $JssorSlider$('slider1_container', options);
<|file_name|>Hello.java<|end_file_name|><|fim▁begin|>/* * Copyright [2017] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at<|fim▁hole|> * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netpet.spools.book.insidethejavavirtualmachine.chapter18; /** * @Desc javap -verbose / javap -c Hello.class 查看字节码文件 * Created by woncz on 2017/8/18. */ public class Hello { }<|fim▁end|>
*
<|file_name|>iron.rs<|end_file_name|><|fim▁begin|>//! Exposes the `Iron` type, the main entrance point of the //! `Iron` library. use std::net::{ToSocketAddrs, SocketAddr}; use std::path::PathBuf; pub use hyper::server::Listening; use hyper::server::Server; use hyper::net::Fresh; use request::HttpRequest; use response::HttpResponse; use error::HttpResult; use {Request, Handler}; use status; /// The primary entrance point to `Iron`, a `struct` to instantiate a new server. /// /// `Iron` contains the `Handler` which takes a `Request` and produces a /// `Response`. pub struct Iron<H> { /// Iron contains a `Handler`, which it uses to create responses for client /// requests. pub handler: H, /// Once listening, the local address that this server is bound to. pub addr: Option<SocketAddr>, /// Once listening, the protocol used to serve content. pub protocol: Option<Protocol> } /// Protocol used to serve content. Future versions of Iron may add new protocols /// to this enum. Thus you should not exhaustively match on its variants. #[derive(Clone)] pub enum Protocol { /// Plaintext HTTP/1 Http, /// HTTP/1 over SSL/TLS Https { /// Path to SSL certificate file certificate: PathBuf, /// Path to SSL private key file key: PathBuf } } impl Protocol { /// Return the name used for this protocol in a URI's scheme part. pub fn name(&self) -> &'static str { match *self { Protocol::Http => "http", Protocol::Https { .. } => "https" } } } impl<H: Handler> Iron<H> { /// Kick off the server process using the HTTP protocol. /// /// Call this once to begin listening for requests on the server. /// This consumes the Iron instance, but does the listening on /// another task, so is not blocking. /// /// The thread returns a guard that will automatically join with the parent /// once it is dropped, blocking until this happens. /// /// Defaults to a threadpool of size `2 * num_cpus`. /// /// ## Panics /// /// Panics if the provided address does not parse. To avoid this /// call `to_socket_addrs` yourself and pass a parsed `SocketAddr`. pub fn http<A: ToSocketAddrs>(self, addr: A) -> HttpResult<Listening> { self.listen_with(addr, 2 * ::num_cpus::get(), Protocol::Http) } /// Kick off the server process using the HTTPS protocol. /// /// Call this once to begin listening for requests on the server. /// This consumes the Iron instance, but does the listening on /// another task, so is not blocking. /// /// The thread returns a guard that will automatically join with the parent /// once it is dropped, blocking until this happens. /// /// Defaults to a threadpool of size `2 * num_cpus`. /// /// ## Panics<|fim▁hole|> /// Panics if the provided address does not parse. To avoid this /// call `to_socket_addrs` yourself and pass a parsed `SocketAddr`. pub fn https<A: ToSocketAddrs>(self, addr: A, certificate: PathBuf, key: PathBuf) -> HttpResult<Listening> { self.listen_with(addr, 2 * ::num_cpus::get(), Protocol::Https { certificate: certificate, key: key }) } /// Kick off the server process with X threads. /// /// ## Panics /// /// Panics if the provided address does not parse. To avoid this /// call `to_socket_addrs` yourself and pass a parsed `SocketAddr`. pub fn listen_with<A: ToSocketAddrs>(mut self, addr: A, threads: usize, protocol: Protocol) -> HttpResult<Listening> { let sock_addr = addr.to_socket_addrs() .ok().and_then(|mut addrs| addrs.next()).expect("Could not parse socket address."); self.addr = Some(sock_addr); self.protocol = Some(protocol.clone()); let server = match protocol { Protocol::Http => Server::http(self), Protocol::Https { ref certificate, ref key } => Server::https(self, certificate, key) }; Ok(try!(server.listen_threads(sock_addr, threads))) } /// Instantiate a new instance of `Iron`. /// /// This will create a new `Iron`, the base unit of the server, using the /// passed in `Handler`. pub fn new(handler: H) -> Iron<H> { Iron { handler: handler, addr: None, protocol: None } } fn bad_request(&self, mut http_res: HttpResponse<Fresh>) { *http_res.status_mut() = status::BadRequest; let http_res = match http_res.start() { Ok(res) => res, // Would like this to work, but if not *shrug* Err(_) => return, }; // We would like this to work, but can't do anything if it doesn't. let _ = http_res.end(); } } impl<H: Handler> ::hyper::server::Handler for Iron<H> { fn handle(&self, http_req: HttpRequest, http_res: HttpResponse<Fresh>) { // Create `Request` wrapper. let mut req = match Request::from_http(http_req, self.addr.clone().unwrap(), self.protocol.as_ref().unwrap()) { Ok(req) => req, Err(e) => { error!("Error creating request:\n {}", e); return self.bad_request(http_res); } }; // Dispatch the request let res = self.handler.handle(&mut req); match res { // Write the response back to http_res Ok(res) => res.write_back(http_res), Err(e) => { error!("Error handling:\n{:?}\nError was: {:?}", req, e.error); e.response.write_back(http_res); } } } }<|fim▁end|>
///
<|file_name|>hv4d.cpp<|end_file_name|><|fim▁begin|>/***************************************************************************** * Copyright (C) 2004-2013 The PaGMO development team, * * Advanced Concepts Team (ACT), European Space Agency (ESA) * * http://apps.sourceforge.net/mediawiki/pagmo * * http://apps.sourceforge.net/mediawiki/pagmo/index.php?title=Developers * * http://apps.sourceforge.net/mediawiki/pagmo/index.php?title=Credits * * [email protected] * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of *<|fim▁hole|> * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the * * Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * *****************************************************************************/ #include "hv4d.h" namespace pagmo { namespace util { namespace hv_algorithm { /// Compute hypervolume /** * @param[in] points vector of points containing the D-dimensional points for which we compute the hypervolume * @param[in] r_point reference point for the points * * @return hypervolume. */ double hv4d::compute(std::vector<fitness_vector> &points, const fitness_vector &r_point) const { // Filter out points that are duplicated, dominated, or sharing the same value of an objective as the reference point // This does not alter the final result of the computation. // Wrapped algorithm is susceptible to such cases, thus it is a precaution measure. std::vector<fitness_vector>::iterator it = points.begin(); while (it != points.end()) { bool erase = false; // Quick check for sharing an objective with the reference point for (unsigned int d_idx = 0 ; d_idx < 4 ; ++d_idx) { if (fabs(r_point[d_idx] - (*it)[d_idx]) < 1e-10) { erase = true; break; } } // If the point was not eliminated during first check, try the domination check if (!erase) { std::vector<fitness_vector>::iterator it2 = points.begin(); while (it2 != points.end()){ if (it == it2) { ++it2; continue; } bool dominates = true; for (unsigned int d_idx = 0 ; d_idx < 4 ; ++d_idx) { if ((*it)[d_idx] < (*it2)[d_idx]) { dominates = false; break; } } if (dominates) { erase = true; break; } ++it2; } } // Erase the point if necessary if (erase) { it = points.erase(it); } else { ++it; } } // Prepare the initial data to suit the original code double* data = new double[points.size() * 4]; double refpoint[4]; for (unsigned int d_idx = 0 ; d_idx < 4 ; ++d_idx) { refpoint[d_idx] = r_point[d_idx]; } unsigned int data_idx = 0; for (unsigned int p_idx = 0 ; p_idx < points.size() ; ++p_idx) { for (unsigned int d_idx = 0 ; d_idx < 4 ; ++d_idx) { data[data_idx++] = points[p_idx][d_idx]; } } double hv = guerreiro_hv4d(data, points.size(), refpoint); delete[] data; return hv; } /// Exclusive method /** * As of yet, this algorithm does not support this method, even in its naive form, due to a poor handling of the dominated points. */ double hv4d::exclusive(const unsigned int p_idx, std::vector<fitness_vector> &points, const fitness_vector &r_point) const { (void)p_idx; (void)points; (void)r_point; pagmo_throw(value_error, "This method is not supported by the hv4d algorithm"); } /// Least contributor method /** * As of yet, this algorithm does not support this method, even in its naive form, due to a poor handling of the dominated points. */ unsigned int hv4d::least_contributor(std::vector<fitness_vector> &points, const fitness_vector &r_point) const { (void)points; (void)r_point; pagmo_throw(value_error, "This method is not supported by the hv4d algorithm"); } /// Greatest contributor method /** * As of yet, this algorithm does not support this method, even in its naive form, due to a poor handling of the dominated points. */ unsigned int hv4d::greatest_contributor(std::vector<fitness_vector> &points, const fitness_vector &r_point) const { (void)points; (void)r_point; pagmo_throw(value_error, "This method is not supported by the hv4d algorithm"); } /// Contributions method /** * As of yet, this algorithm does not support this method, even in its naive form, due to a poor handling of the dominated points. */ std::vector<double> hv4d::contributions(std::vector<fitness_vector> &points, const fitness_vector &r_point) const { (void)points; (void)r_point; pagmo_throw(value_error, "This method is not supported by the hv4d algorithm"); } /// Verify before compute /** * Verifies whether given algorithm suits the requested data. * * @param[in] points vector of points containing the 4-dimensional points for which we compute the hypervolume * @param[in] r_point reference point for the vector of points * * @throws value_error when trying to compute the hypervolume for the non-maximal reference point */ void hv4d::verify_before_compute(const std::vector<fitness_vector> &points, const fitness_vector &r_point) const { if (r_point.size() != 4) { pagmo_throw(value_error, "Algorithm HV4D works only for 4-dimensional cases"); } base::assert_minimisation(points, r_point); } /// Clone method. base_ptr hv4d::clone() const { return base_ptr(new hv4d(*this)); } /// Algorithm name std::string hv4d::get_name() const { return "Four-dimensional hypervolume by Andreia P. Guerreiro"; } } } } BOOST_CLASS_EXPORT_IMPLEMENT(pagmo::util::hv_algorithm::hv4d);<|fim▁end|>
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
<|file_name|>animated_properties.mako.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ <%namespace name="helpers" file="/helpers.mako.rs" /> <% from data import to_idl_name, SYSTEM_FONT_LONGHANDS %> use cssparser::Parser; #[cfg(feature = "gecko")] use gecko_bindings::bindings::RawServoAnimationValueMap; #[cfg(feature = "gecko")] use gecko_bindings::structs::RawGeckoGfxMatrix4x4; #[cfg(feature = "gecko")] use gecko_bindings::structs::nsCSSPropertyID; #[cfg(feature = "gecko")] use gecko_bindings::sugar::ownership::{HasFFI, HasSimpleFFI}; use itertools::{EitherOrBoth, Itertools}; use properties::{CSSWideKeyword, PropertyDeclaration}; use properties::longhands; use properties::longhands::font_weight::computed_value::T as FontWeight; use properties::longhands::font_stretch::computed_value::T as FontStretch; #[cfg(feature = "gecko")] use properties::longhands::font_variation_settings::computed_value::T as FontVariationSettings; use properties::longhands::visibility::computed_value::T as Visibility; #[cfg(feature = "gecko")] use properties::PropertyId; use properties::{LonghandId, ShorthandId}; use selectors::parser::SelectorParseErrorKind; use servo_arc::Arc; use smallvec::SmallVec; use std::cmp; use std::fmt; #[cfg(feature = "gecko")] use hash::FnvHashMap; use style_traits::{ParseError, ToCss}; use super::ComputedValues; use values::{CSSFloat, CustomIdent, Either}; use values::animated::{Animate, Procedure, ToAnimatedValue, ToAnimatedZero}; use values::animated::color::RGBA as AnimatedRGBA; use values::animated::effects::Filter as AnimatedFilter; use values::animated::effects::FilterList as AnimatedFilterList; use values::computed::{Angle, CalcLengthOrPercentage}; use values::computed::{ClipRect, Context, ComputedUrl}; use values::computed::{Length, LengthOrPercentage, LengthOrPercentageOrAuto}; use values::computed::{LengthOrPercentageOrNone, MaxLength}; use values::computed::{NonNegativeNumber, Number, NumberOrPercentage, Percentage}; use values::computed::length::NonNegativeLengthOrPercentage; use values::computed::ToComputedValue; use values::computed::transform::{DirectionVector, Matrix, Matrix3D}; use values::computed::transform::TransformOperation as ComputedTransformOperation; use values::computed::transform::Transform as ComputedTransform; use values::generics::transform::{Transform, TransformOperation}; use values::distance::{ComputeSquaredDistance, SquaredDistance}; #[cfg(feature = "gecko")] use values::generics::FontSettings as GenericFontSettings; #[cfg(feature = "gecko")] use values::generics::FontSettingTag as GenericFontSettingTag; #[cfg(feature = "gecko")] use values::generics::FontSettingTagFloat; use values::generics::NonNegative; use values::generics::effects::Filter; use values::generics::position as generic_position; use values::generics::svg::{SVGLength, SvgLengthOrPercentageOrNumber, SVGPaint}; use values::generics::svg::{SVGPaintKind, SVGStrokeDashArray, SVGOpacity}; /// <https://drafts.csswg.org/css-transitions/#animtype-repeatable-list> pub trait RepeatableListAnimatable: Animate {} /// Returns true if this nsCSSPropertyID is one of the animatable properties. #[cfg(feature = "gecko")] pub fn nscsspropertyid_is_animatable(property: nsCSSPropertyID) -> bool { match property { % for prop in data.longhands + data.shorthands_except_all(): % if prop.animatable: ${helpers.to_nscsspropertyid(prop.ident)} => true, % endif % endfor _ => false } } /// A given transition property, that is either `All`, a transitionable longhand property, /// a shorthand with at least one transitionable longhand component, or an unsupported property. // NB: This needs to be here because it needs all the longhands generated // beforehand. #[derive(Clone, Debug, Eq, Hash, MallocSizeOf, PartialEq)] pub enum TransitionProperty { /// All, any transitionable property changing should generate a transition. /// /// FIXME(emilio): Can we remove this and just use /// Shorthand(ShorthandId::All)? All, /// A shorthand. Shorthand(ShorthandId), /// A longhand transitionable property. Longhand(LonghandId), /// Unrecognized property which could be any non-transitionable, custom property, or /// unknown property. Unsupported(CustomIdent), } impl ToCss for TransitionProperty { fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write { match *self { TransitionProperty::All => dest.write_str("all"), TransitionProperty::Shorthand(ref id) => dest.write_str(id.name()), TransitionProperty::Longhand(ref id) => dest.write_str(id.name()), TransitionProperty::Unsupported(ref id) => id.to_css(dest), } } } trivial_to_computed_value!(TransitionProperty); impl TransitionProperty { /// Iterates over each longhand property. pub fn each<F: FnMut(&LonghandId) -> ()>(mut cb: F) { % for prop in data.longhands: % if prop.transitionable: cb(&LonghandId::${prop.camel_case}); % endif % endfor } /// Iterates over every longhand property that is not /// TransitionProperty::All, stopping and returning true when the provided /// callback returns true for the first time. pub fn any<F: FnMut(&LonghandId) -> bool>(mut cb: F) -> bool { % for prop in data.longhands: % if prop.transitionable: if cb(&LonghandId::${prop.camel_case}) { return true; } % endif % endfor false } /// Parse a transition-property value. pub fn parse<'i, 't>(input: &mut Parser<'i, 't>) -> Result<Self, ParseError<'i>> { let location = input.current_source_location(); let ident = input.expect_ident()?; match_ignore_ascii_case! { &ident, "all" => Ok(TransitionProperty::All), % for prop in data.shorthands_except_all(): "${prop.name}" => Ok(TransitionProperty::Shorthand(ShorthandId::${prop.camel_case})), % endfor % for prop in data.longhands: "${prop.name}" => Ok(TransitionProperty::Longhand(LonghandId::${prop.camel_case})), % endfor "none" => Err(location.new_custom_error(SelectorParseErrorKind::UnexpectedIdent(ident.clone()))), _ => CustomIdent::from_ident(location, ident, &[]).map(TransitionProperty::Unsupported), } } /// Convert TransitionProperty to nsCSSPropertyID. #[cfg(feature = "gecko")] pub fn to_nscsspropertyid(&self) -> Result<nsCSSPropertyID, ()> { Ok(match *self { TransitionProperty::All => nsCSSPropertyID::eCSSPropertyExtra_all_properties, TransitionProperty::Shorthand(ref id) => id.to_nscsspropertyid(), TransitionProperty::Longhand(ref id) => id.to_nscsspropertyid(), TransitionProperty::Unsupported(..) => return Err(()), }) } } /// Convert nsCSSPropertyID to TransitionProperty #[cfg(feature = "gecko")] #[allow(non_upper_case_globals)] impl From<nsCSSPropertyID> for TransitionProperty { fn from(property: nsCSSPropertyID) -> TransitionProperty { match property { % for prop in data.longhands: ${helpers.to_nscsspropertyid(prop.ident)} => { TransitionProperty::Longhand(LonghandId::${prop.camel_case}) } % endfor % for prop in data.shorthands_except_all(): ${helpers.to_nscsspropertyid(prop.ident)} => { TransitionProperty::Shorthand(ShorthandId::${prop.camel_case}) } % endfor nsCSSPropertyID::eCSSPropertyExtra_all_properties => TransitionProperty::All, _ => { panic!("non-convertible nsCSSPropertyID::{:?}", property) } } } } /// Returns true if this nsCSSPropertyID is one of the transitionable properties. #[cfg(feature = "gecko")] pub fn nscsspropertyid_is_transitionable(property: nsCSSPropertyID) -> bool { match property { % for prop in data.longhands + data.shorthands_except_all(): % if prop.transitionable: ${helpers.to_nscsspropertyid(prop.ident)} => true, % endif % endfor _ => false } } /// An animated property interpolation between two computed values for that /// property. #[cfg(feature = "servo")] #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "servo", derive(MallocSizeOf))] pub enum AnimatedProperty { % for prop in data.longhands: % if prop.animatable: <% value_type = "longhands::{}::computed_value::T".format(prop.ident) if not prop.is_animatable_with_computed_value: value_type = "<{} as ToAnimatedValue>::AnimatedValue".format(value_type) %> /// ${prop.name} ${prop.camel_case}(${value_type}, ${value_type}), % endif % endfor } #[cfg(feature = "servo")] impl AnimatedProperty { /// Get the name of this property. pub fn name(&self) -> &'static str { match *self { % for prop in data.longhands: % if prop.animatable: AnimatedProperty::${prop.camel_case}(..) => "${prop.name}", % endif % endfor } } /// Whether this interpolation does animate, that is, whether the start and /// end values are different. pub fn does_animate(&self) -> bool { match *self { % for prop in data.longhands: % if prop.animatable: AnimatedProperty::${prop.camel_case}(ref from, ref to) => from != to, % endif % endfor } } /// Whether an animated property has the same end value as another. pub fn has_the_same_end_value_as(&self, other: &Self) -> bool { match (self, other) { % for prop in data.longhands: % if prop.animatable: (&AnimatedProperty::${prop.camel_case}(_, ref this_end_value), &AnimatedProperty::${prop.camel_case}(_, ref other_end_value)) => { this_end_value == other_end_value } % endif % endfor _ => false, } } /// Update `style` with the proper computed style corresponding to this /// animation at `progress`. pub fn update(&self, style: &mut ComputedValues, progress: f64) { match *self { % for prop in data.longhands: % if prop.animatable: AnimatedProperty::${prop.camel_case}(ref from, ref to) => { // https://w3c.github.io/web-animations/#discrete-animation-type % if prop.animation_value_type == "discrete": let value = if progress < 0.5 { from.clone() } else { to.clone() }; % else: let value = match from.animate(to, Procedure::Interpolate { progress }) { Ok(value) => value, Err(()) => return, }; % endif % if not prop.is_animatable_with_computed_value: let value: longhands::${prop.ident}::computed_value::T = ToAnimatedValue::from_animated_value(value); % endif style.mutate_${prop.style_struct.name_lower}().set_${prop.ident}(value); } % endif % endfor } } /// Get an animatable value from a transition-property, an old style, and a /// new style. pub fn from_longhand( property: &LonghandId, old_style: &ComputedValues, new_style: &ComputedValues, ) -> Option<AnimatedProperty> { Some(match *property { % for prop in data.longhands: % if prop.animatable: LonghandId::${prop.camel_case} => { let old_computed = old_style.get_${prop.style_struct.ident.strip("_")}().clone_${prop.ident}(); let new_computed = new_style.get_${prop.style_struct.ident.strip("_")}().clone_${prop.ident}(); AnimatedProperty::${prop.camel_case}( % if prop.is_animatable_with_computed_value: old_computed, new_computed, % else: old_computed.to_animated_value(), new_computed.to_animated_value(), % endif ) } % endif % endfor _ => return None, }) } } /// A collection of AnimationValue that were composed on an element. /// This HashMap stores the values that are the last AnimationValue to be /// composed for each TransitionProperty. #[cfg(feature = "gecko")] pub type AnimationValueMap = FnvHashMap<LonghandId, AnimationValue>; #[cfg(feature = "gecko")] unsafe impl HasFFI for AnimationValueMap { type FFIType = RawServoAnimationValueMap; } #[cfg(feature = "gecko")] unsafe impl HasSimpleFFI for AnimationValueMap {} /// An enum to represent a single computed value belonging to an animated /// property in order to be interpolated with another one. When interpolating, /// both values need to belong to the same property. /// /// This is different to AnimatedProperty in the sense that AnimatedProperty /// also knows the final value to be used during the animation. /// /// This is to be used in Gecko integration code. /// /// FIXME: We need to add a path for custom properties, but that's trivial after /// this (is a similar path to that of PropertyDeclaration). #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "servo", derive(MallocSizeOf))] pub enum AnimationValue { % for prop in data.longhands: % if prop.animatable: /// ${prop.name} % if prop.is_animatable_with_computed_value: ${prop.camel_case}(longhands::${prop.ident}::computed_value::T), % else: ${prop.camel_case}(<longhands::${prop.ident}::computed_value::T as ToAnimatedValue>::AnimatedValue), % endif % endif % endfor } impl AnimationValue { /// Returns the longhand id this animated value corresponds to. pub fn id(&self) -> LonghandId { match *self { % for prop in data.longhands: % if prop.animatable: AnimationValue::${prop.camel_case}(..) => LonghandId::${prop.camel_case}, % endif % endfor } } /// "Uncompute" this animation value in order to be used inside the CSS /// cascade. pub fn uncompute(&self) -> PropertyDeclaration { use properties::longhands; match *self { % for prop in data.longhands: % if prop.animatable: AnimationValue::${prop.camel_case}(ref from) => { PropertyDeclaration::${prop.camel_case}( % if prop.boxed: Box::new( % endif longhands::${prop.ident}::SpecifiedValue::from_computed_value( % if prop.is_animatable_with_computed_value: from % else: &ToAnimatedValue::from_animated_value(from.clone()) % endif )) % if prop.boxed: ) % endif } % endif % endfor } } /// Construct an AnimationValue from a property declaration. pub fn from_declaration( decl: &PropertyDeclaration, context: &mut Context, extra_custom_properties: Option<<&Arc<::custom_properties::CustomPropertiesMap>>, initial: &ComputedValues ) -> Option<Self> { use properties::LonghandId; let animatable = match *decl { % for prop in data.longhands: % if prop.animatable: PropertyDeclaration::${prop.camel_case}(ref val) => { context.for_non_inherited_property = % if prop.style_struct.inherited: None; % else: Some(LonghandId::${prop.camel_case}); % endif % if prop.ident in SYSTEM_FONT_LONGHANDS and product == "gecko": if let Some(sf) = val.get_system() { longhands::system_font::resolve_system_font(sf, context); } % endif % if prop.boxed: let computed = (**val).to_computed_value(context); % else: let computed = val.to_computed_value(context); % endif AnimationValue::${prop.camel_case}( % if prop.is_animatable_with_computed_value: computed % else: computed.to_animated_value() % endif ) }, % endif % endfor PropertyDeclaration::CSSWideKeyword(id, keyword) => { match id { // We put all the animatable properties first in the hopes // that it might increase match locality. % for prop in data.longhands: % if prop.animatable: LonghandId::${prop.camel_case} => { let style_struct = match keyword { % if not prop.style_struct.inherited: CSSWideKeyword::Unset | % endif CSSWideKeyword::Initial => { initial.get_${prop.style_struct.name_lower}() }, % if prop.style_struct.inherited: CSSWideKeyword::Unset | % endif CSSWideKeyword::Inherit => { context.builder .get_parent_${prop.style_struct.name_lower}() }, }; let computed = style_struct.clone_${prop.ident}(); % if not prop.is_animatable_with_computed_value: let computed = computed.to_animated_value(); % endif AnimationValue::${prop.camel_case}(computed) }, % endif % endfor % for prop in data.longhands: % if not prop.animatable: LonghandId::${prop.camel_case} => return None, % endif % endfor } }, PropertyDeclaration::WithVariables(id, ref unparsed) => { let substituted = { let custom_properties = extra_custom_properties.or_else(|| context.style().custom_properties()); unparsed.substitute_variables( id, custom_properties, context.quirks_mode ) }; return AnimationValue::from_declaration( &substituted, context, extra_custom_properties, initial, ) }, _ => return None // non animatable properties will get included because of shorthands. ignore. }; Some(animatable) } /// Get an AnimationValue for an AnimatableLonghand from a given computed values. pub fn from_computed_values( property: &LonghandId, computed_values: &ComputedValues ) -> Option<Self> { Some(match *property { % for prop in data.longhands: % if prop.animatable: LonghandId::${prop.camel_case} => { let computed = computed_values .get_${prop.style_struct.ident.strip("_")}() .clone_${prop.ident}(); AnimationValue::${prop.camel_case}( % if prop.is_animatable_with_computed_value: computed % else: computed.to_animated_value() % endif ) } % endif % endfor _ => return None, }) } } fn animate_discrete<T: Clone>(this: &T, other: &T, procedure: Procedure) -> Result<T, ()> { if let Procedure::Interpolate { progress } = procedure { Ok(if progress < 0.5 { this.clone() } else { other.clone() }) } else { Err(()) } } impl Animate for AnimationValue { fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { let value = match (self, other) { % for prop in data.longhands: % if prop.animatable: % if prop.animation_value_type != "discrete": ( &AnimationValue::${prop.camel_case}(ref this), &AnimationValue::${prop.camel_case}(ref other), ) => { AnimationValue::${prop.camel_case}( this.animate(other, procedure)?, ) }, % else: ( &AnimationValue::${prop.camel_case}(ref this), &AnimationValue::${prop.camel_case}(ref other), ) => { AnimationValue::${prop.camel_case}( animate_discrete(this, other, procedure)? ) }, % endif % endif % endfor _ => { panic!("Unexpected AnimationValue::animate call, got: {:?}, {:?}", self, other); } }; Ok(value) } } impl ComputeSquaredDistance for AnimationValue { fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> { match *self { % for i, prop in enumerate([p for p in data.longhands if p.animatable and p.animation_value_type == "discrete"]): % if i > 0: | % endif AnimationValue::${prop.camel_case}(..) % endfor => return Err(()), _ => (), } match (self, other) { % for prop in data.longhands: % if prop.animatable: % if prop.animation_value_type != "discrete": (&AnimationValue::${prop.camel_case}(ref this), &AnimationValue::${prop.camel_case}(ref other)) => { this.compute_squared_distance(other) }, % endif % endif % endfor _ => { panic!( "computed values should be of the same property, got: {:?}, {:?}", self, other ); }, } } } impl ToAnimatedZero for AnimationValue { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match *self { % for prop in data.longhands: % if prop.animatable and prop.animation_value_type != "discrete": AnimationValue::${prop.camel_case}(ref base) => { Ok(AnimationValue::${prop.camel_case}(base.to_animated_zero()?)) }, % endif % endfor _ => Err(()), } } } impl RepeatableListAnimatable for LengthOrPercentage {} impl RepeatableListAnimatable for Either<f32, LengthOrPercentage> {} impl RepeatableListAnimatable for Either<NonNegativeNumber, NonNegativeLengthOrPercentage> {} impl RepeatableListAnimatable for SvgLengthOrPercentageOrNumber<NonNegativeLengthOrPercentage, NonNegativeNumber> {} macro_rules! repeated_vec_impl { ($($ty:ty),*) => { $(impl<T> Animate for $ty where T: RepeatableListAnimatable, { fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { // If the length of either list is zero, the least common multiple is undefined. if self.is_empty() || other.is_empty() { return Err(()); } use num_integer::lcm; let len = lcm(self.len(), other.len()); self.iter().cycle().zip(other.iter().cycle()).take(len).map(|(this, other)| { this.animate(other, procedure) }).collect() } } impl<T> ComputeSquaredDistance for $ty where T: ComputeSquaredDistance + RepeatableListAnimatable, { #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> { if self.is_empty() || other.is_empty() { return Err(()); } use num_integer::lcm; let len = lcm(self.len(), other.len()); self.iter().cycle().zip(other.iter().cycle()).take(len).map(|(this, other)| { this.compute_squared_distance(other) }).sum() } })* }; } repeated_vec_impl!(SmallVec<[T; 1]>, Vec<T>); /// <https://drafts.csswg.org/css-transitions/#animtype-visibility> impl Animate for Visibility { #[inline] fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { let (this_weight, other_weight) = procedure.weights(); match (*self, *other) { (Visibility::visible, _) => { Ok(if this_weight > 0.0 { *self } else { *other }) }, (_, Visibility::visible) => { Ok(if other_weight > 0.0 { *other } else { *self }) }, _ => Err(()), } } } impl ComputeSquaredDistance for Visibility { #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> { Ok(SquaredDistance::Value(if *self == *other { 0. } else { 1. })) } } impl ToAnimatedZero for Visibility { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } /// <https://drafts.csswg.org/css-transitions/#animtype-lpcalc> impl Animate for CalcLengthOrPercentage { #[inline] fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { let animate_percentage_half = |this: Option<Percentage>, other: Option<Percentage>| { if this.is_none() && other.is_none() { return Ok(None); } let this = this.unwrap_or_default(); let other = other.unwrap_or_default(); Ok(Some(this.animate(&other, procedure)?)) }; let length = self.unclamped_length().animate(&other.unclamped_length(), procedure)?; let percentage = animate_percentage_half(self.percentage, other.percentage)?; Ok(CalcLengthOrPercentage::with_clamping_mode(length, percentage, self.clamping_mode)) } } impl ToAnimatedZero for LengthOrPercentageOrAuto { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match *self { LengthOrPercentageOrAuto::Length(_) | LengthOrPercentageOrAuto::Percentage(_) | LengthOrPercentageOrAuto::Calc(_) => { Ok(LengthOrPercentageOrAuto::Length(Length::new(0.))) }, LengthOrPercentageOrAuto::Auto => Err(()), } } } impl ToAnimatedZero for LengthOrPercentageOrNone { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match *self { LengthOrPercentageOrNone::Length(_) | LengthOrPercentageOrNone::Percentage(_) | LengthOrPercentageOrNone::Calc(_) => { Ok(LengthOrPercentageOrNone::Length(Length::new(0.))) }, LengthOrPercentageOrNone::None => Err(()), } } } impl ToAnimatedZero for MaxLength { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } /// <http://dev.w3.org/csswg/css-transitions/#animtype-font-weight> impl Animate for FontWeight { #[inline] fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { let a = self.0 as f64; let b = other.0 as f64; const NORMAL: f64 = 400.; let (this_weight, other_weight) = procedure.weights(); let weight = (a - NORMAL) * this_weight + (b - NORMAL) * other_weight + NORMAL; let weight = (weight.max(100.).min(900.) / 100.).round() * 100.; Ok(FontWeight(weight as u16)) } } impl ToAnimatedZero for FontWeight { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Ok(FontWeight::normal()) } } /// <https://drafts.csswg.org/css-fonts/#font-stretch-prop> impl Animate for FontStretch { #[inline] fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { let from = f64::from(*self); let to = f64::from(*other); let normal = f64::from(FontStretch::normal); let (this_weight, other_weight) = procedure.weights(); let result = (from - normal) * this_weight + (to - normal) * other_weight + normal; Ok(result.into()) } } impl ComputeSquaredDistance for FontStretch { #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> { f64::from(*self).compute_squared_distance(&(*other).into()) } } impl ToAnimatedZero for FontStretch { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } /// We should treat font stretch as real number in order to interpolate this property. /// <https://drafts.csswg.org/css-fonts-3/#font-stretch-animation> impl From<FontStretch> for f64 { fn from(stretch: FontStretch) -> f64 { use self::FontStretch::*; match stretch { ultra_condensed => 1.0, extra_condensed => 2.0, condensed => 3.0, semi_condensed => 4.0, normal => 5.0, semi_expanded => 6.0, expanded => 7.0, extra_expanded => 8.0, ultra_expanded => 9.0, } } } impl Into<FontStretch> for f64 { fn into(self) -> FontStretch { use properties::longhands::font_stretch::computed_value::T::*; let index = (self + 0.5).floor().min(9.0).max(1.0); static FONT_STRETCH_ENUM_MAP: [FontStretch; 9] = [ ultra_condensed, extra_condensed, condensed, semi_condensed, normal, semi_expanded, expanded, extra_expanded, ultra_expanded ]; FONT_STRETCH_ENUM_MAP[(index - 1.0) as usize] } } /// <https://drafts.csswg.org/css-fonts-4/#font-variation-settings-def> #[cfg(feature = "gecko")] impl Animate for FontVariationSettings { #[inline] fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { FontSettingTagIter::new(self, other)? .map(|r| r.and_then(|(st, ot)| st.animate(&ot, procedure))) .collect::<Result<Vec<FontSettingTag>, ()>>() .map(GenericFontSettings::Tag) } } #[cfg(feature = "gecko")] impl ComputeSquaredDistance for FontVariationSettings { #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> { FontSettingTagIter::new(self, other)? .map(|r| r.and_then(|(st, ot)| st.compute_squared_distance(&ot))) .sum() } } #[cfg(feature = "gecko")] impl ToAnimatedZero for FontVariationSettings { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } #[cfg(feature = "gecko")] impl Animate for FontSettingTag { #[inline] fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { if self.tag != other.tag { return Err(()); } let value = self.value.animate(&other.value, procedure)?; Ok(FontSettingTag { tag: self.tag, value, }) } } #[cfg(feature = "gecko")] impl ComputeSquaredDistance for FontSettingTag { #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> { if self.tag != other.tag { return Err(()); } self.value.compute_squared_distance(&other.value) } } #[cfg(feature = "gecko")] type FontSettingTag = GenericFontSettingTag<FontSettingTagFloat>; #[cfg(feature = "gecko")] struct FontSettingTagIterState<'a> { tags: Vec<(&'a FontSettingTag)>, index: usize, prev_tag: u32, } #[cfg(feature = "gecko")] impl<'a> FontSettingTagIterState<'a> { fn new(tags: Vec<(&'a FontSettingTag)>) -> FontSettingTagIterState<'a> { FontSettingTagIterState { index: tags.len(), tags, prev_tag: 0, } } } /// Iterator for font-variation-settings tag lists /// /// [CSS fonts level 4](https://drafts.csswg.org/css-fonts-4/#descdef-font-face-font-variation-settings) /// defines the animation of font-variation-settings as follows: /// /// Two declarations of font-feature-settings[sic] can be animated between if they are "like". /// "Like" declarations are ones where the same set of properties appear (in any order). /// Because succesive[sic] duplicate properties are applied instead of prior duplicate /// properties, two declarations can be "like" even if they have differing number of /// properties. If two declarations are "like" then animation occurs pairwise between /// corresponding values in the declarations. /// /// In other words if we have the following lists: /// /// "wght" 1.4, "wdth" 5, "wght" 2 /// "wdth" 8, "wght" 4, "wdth" 10 /// /// We should animate between: /// /// "wdth" 5, "wght" 2 /// "wght" 4, "wdth" 10 /// /// This iterator supports this by sorting the two lists, then iterating them in reverse, /// and skipping entries with repeated tag names. It will return Some(Err()) if it reaches the /// end of one list before the other, or if the tag names do not match. /// /// For the above example, this iterator would return: /// /// Some(Ok("wght" 2, "wght" 4)) /// Some(Ok("wdth" 5, "wdth" 10)) /// None /// #[cfg(feature = "gecko")] struct FontSettingTagIter<'a> { a_state: FontSettingTagIterState<'a>, b_state: FontSettingTagIterState<'a>, } #[cfg(feature = "gecko")] impl<'a> FontSettingTagIter<'a> { fn new( a_settings: &'a FontVariationSettings, b_settings: &'a FontVariationSettings, ) -> Result<FontSettingTagIter<'a>, ()> { if let (&GenericFontSettings::Tag(ref a_tags), &GenericFontSettings::Tag(ref b_tags)) = (a_settings, b_settings) { fn as_new_sorted_tags(tags: &Vec<FontSettingTag>) -> Vec<(&FontSettingTag)> { use std::iter::FromIterator; let mut sorted_tags: Vec<(&FontSettingTag)> = Vec::from_iter(tags.iter()); sorted_tags.sort_by_key(|k| k.tag); sorted_tags }; Ok(FontSettingTagIter { a_state: FontSettingTagIterState::new(as_new_sorted_tags(a_tags)), b_state: FontSettingTagIterState::new(as_new_sorted_tags(b_tags)), }) } else { Err(()) } } fn next_tag(state: &mut FontSettingTagIterState<'a>) -> Option<(&'a FontSettingTag)> { if state.index == 0 { return None; } state.index -= 1; let tag = state.tags[state.index]; if tag.tag == state.prev_tag { FontSettingTagIter::next_tag(state) } else { state.prev_tag = tag.tag; Some(tag) } } } #[cfg(feature = "gecko")] impl<'a> Iterator for FontSettingTagIter<'a> { type Item = Result<(&'a FontSettingTag, &'a FontSettingTag), ()>; fn next(&mut self) -> Option<Result<(&'a FontSettingTag, &'a FontSettingTag), ()>> { match ( FontSettingTagIter::next_tag(&mut self.a_state), FontSettingTagIter::next_tag(&mut self.b_state), ) { (Some(at), Some(bt)) if at.tag == bt.tag => Some(Ok((at, bt))), (None, None) => None, _ => Some(Err(())), // Mismatch number of unique tags or tag names. } } } impl<H, V> RepeatableListAnimatable for generic_position::Position<H, V> where H: RepeatableListAnimatable, V: RepeatableListAnimatable {} /// <https://drafts.csswg.org/css-transitions/#animtype-rect> impl Animate for ClipRect { #[inline] fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { use values::computed::Length; let animate_component = |this: &Option<Length>, other: &Option<Length>| { match (this.animate(other, procedure)?, procedure) { (None, Procedure::Interpolate { .. }) => Ok(None), (None, _) => Err(()), (result, _) => Ok(result), } }; Ok(ClipRect { top: animate_component(&self.top, &other.top)?, right: animate_component(&self.right, &other.right)?, bottom: animate_component(&self.bottom, &other.bottom)?, left: animate_component(&self.left, &other.left)?, }) } } impl ToAnimatedZero for ClipRect { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Err(()) } } fn animate_multiplicative_factor( this: CSSFloat, other: CSSFloat, procedure: Procedure, ) -> Result<CSSFloat, ()> { Ok((this - 1.).animate(&(other - 1.), procedure)? + 1.) } /// <http://dev.w3.org/csswg/css-transforms/#interpolation-of-transforms> impl Animate for ComputedTransformOperation { fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { match (self, other) { ( &TransformOperation::Matrix3D(ref this), &TransformOperation::Matrix3D(ref other), ) => { Ok(TransformOperation::Matrix3D( this.animate(other, procedure)?, )) }, ( &TransformOperation::Matrix(ref this), &TransformOperation::Matrix(ref other), ) => { Ok(TransformOperation::Matrix( this.animate(other, procedure)?, )) }, ( &TransformOperation::Skew(ref fx, ref fy), &TransformOperation::Skew(ref tx, ref ty), ) => { Ok(TransformOperation::Skew( fx.animate(tx, procedure)?, fy.animate(ty, procedure)?, )) }, ( &TransformOperation::SkewX(ref f), &TransformOperation::SkewX(ref t), ) => { Ok(TransformOperation::SkewX( f.animate(t, procedure)?, )) }, ( &TransformOperation::SkewY(ref f), &TransformOperation::SkewY(ref t), ) => { Ok(TransformOperation::SkewY( f.animate(t, procedure)?, )) }, ( &TransformOperation::Translate3D(ref fx, ref fy, ref fz), &TransformOperation::Translate3D(ref tx, ref ty, ref tz), ) => { Ok(TransformOperation::Translate3D( fx.animate(tx, procedure)?, fy.animate(ty, procedure)?, fz.animate(tz, procedure)?, )) }, ( &TransformOperation::Translate(ref fx, ref fy), &TransformOperation::Translate(ref tx, ref ty), ) => { Ok(TransformOperation::Translate( fx.animate(tx, procedure)?, fy.animate(ty, procedure)? )) }, ( &TransformOperation::TranslateX(ref f), &TransformOperation::TranslateX(ref t), ) => { Ok(TransformOperation::TranslateX( f.animate(t, procedure)? )) }, ( &TransformOperation::TranslateY(ref f), &TransformOperation::TranslateY(ref t), ) => { Ok(TransformOperation::TranslateY( f.animate(t, procedure)? )) }, ( &TransformOperation::TranslateZ(ref f), &TransformOperation::TranslateZ(ref t), ) => { Ok(TransformOperation::TranslateZ( f.animate(t, procedure)? )) }, ( &TransformOperation::Scale3D(ref fx, ref fy, ref fz), &TransformOperation::Scale3D(ref tx, ref ty, ref tz), ) => { Ok(TransformOperation::Scale3D( animate_multiplicative_factor(*fx, *tx, procedure)?, animate_multiplicative_factor(*fy, *ty, procedure)?, animate_multiplicative_factor(*fz, *tz, procedure)?, )) }, ( &TransformOperation::ScaleX(ref f), &TransformOperation::ScaleX(ref t), ) => { Ok(TransformOperation::ScaleX( animate_multiplicative_factor(*f, *t, procedure)? )) }, ( &TransformOperation::ScaleY(ref f), &TransformOperation::ScaleY(ref t), ) => { Ok(TransformOperation::ScaleY( animate_multiplicative_factor(*f, *t, procedure)? )) }, ( &TransformOperation::ScaleZ(ref f), &TransformOperation::ScaleZ(ref t), ) => { Ok(TransformOperation::ScaleZ( animate_multiplicative_factor(*f, *t, procedure)? )) }, ( &TransformOperation::Rotate3D(fx, fy, fz, fa), &TransformOperation::Rotate3D(tx, ty, tz, ta), ) => { let (fx, fy, fz, fa) = ComputedTransform::get_normalized_vector_and_angle(fx, fy, fz, fa); let (tx, ty, tz, ta) = ComputedTransform::get_normalized_vector_and_angle(tx, ty, tz, ta); if (fx, fy, fz) == (tx, ty, tz) { let ia = fa.animate(&ta, procedure)?; Ok(TransformOperation::Rotate3D(fx, fy, fz, ia)) } else { let matrix_f = rotate_to_matrix(fx, fy, fz, fa); let matrix_t = rotate_to_matrix(tx, ty, tz, ta); Ok(TransformOperation::Matrix3D( matrix_f.animate(&matrix_t, procedure)?, )) } }, ( &TransformOperation::RotateX(fa), &TransformOperation::RotateX(ta), ) => { Ok(TransformOperation::RotateX( fa.animate(&ta, procedure)? )) }, ( &TransformOperation::RotateY(fa), &TransformOperation::RotateY(ta), ) => { Ok(TransformOperation::RotateY( fa.animate(&ta, procedure)? )) }, ( &TransformOperation::RotateZ(fa), &TransformOperation::RotateZ(ta), ) => { Ok(TransformOperation::RotateZ( fa.animate(&ta, procedure)? )) }, ( &TransformOperation::Rotate(fa), &TransformOperation::Rotate(ta), ) => { Ok(TransformOperation::Rotate( fa.animate(&ta, procedure)? )) }, ( &TransformOperation::Rotate(fa), &TransformOperation::RotateZ(ta), ) => { Ok(TransformOperation::Rotate( fa.animate(&ta, procedure)? )) }, ( &TransformOperation::RotateZ(fa), &TransformOperation::Rotate(ta), ) => { Ok(TransformOperation::Rotate( fa.animate(&ta, procedure)? )) }, ( &TransformOperation::Perspective(ref fd), &TransformOperation::Perspective(ref td), ) => { let mut fd_matrix = Matrix3D::identity(); let mut td_matrix = Matrix3D::identity(); if fd.px() > 0. { fd_matrix.m34 = -1. / fd.px(); } if td.px() > 0. { td_matrix.m34 = -1. / td.px(); } Ok(TransformOperation::Matrix3D( fd_matrix.animate(&td_matrix, procedure)?, )) }, _ if self.is_translate() && other.is_translate() => { self.to_translate_3d().animate(&other.to_translate_3d(), procedure) } _ if self.is_scale() && other.is_scale() => { self.to_scale_3d().animate(&other.to_scale_3d(), procedure) } _ => Err(()), } } } fn is_matched_operation(first: &ComputedTransformOperation, second: &ComputedTransformOperation) -> bool { match (first, second) { (&TransformOperation::Matrix(..), &TransformOperation::Matrix(..)) | (&TransformOperation::Matrix3D(..), &TransformOperation::Matrix3D(..)) | (&TransformOperation::Skew(..), &TransformOperation::Skew(..)) | (&TransformOperation::SkewX(..), &TransformOperation::SkewX(..)) | (&TransformOperation::SkewY(..), &TransformOperation::SkewY(..)) | (&TransformOperation::Rotate(..), &TransformOperation::Rotate(..)) | (&TransformOperation::Rotate3D(..), &TransformOperation::Rotate3D(..)) | (&TransformOperation::RotateX(..), &TransformOperation::RotateX(..)) | (&TransformOperation::RotateY(..), &TransformOperation::RotateY(..)) | (&TransformOperation::RotateZ(..), &TransformOperation::RotateZ(..)) | (&TransformOperation::Perspective(..), &TransformOperation::Perspective(..)) => true, // we animate scale and translate operations against each other (a, b) if a.is_translate() && b.is_translate() => true, (a, b) if a.is_scale() && b.is_scale() => true, // InterpolateMatrix and AccumulateMatrix are for mismatched transform. _ => false } } /// <https://www.w3.org/TR/css-transforms-1/#Rotate3dDefined> fn rotate_to_matrix(x: f32, y: f32, z: f32, a: Angle) -> Matrix3D { let half_rad = a.radians() / 2.0; let sc = (half_rad).sin() * (half_rad).cos(); let sq = (half_rad).sin().powi(2); Matrix3D { m11: 1.0 - 2.0 * (y * y + z * z) * sq, m12: 2.0 * (x * y * sq + z * sc), m13: 2.0 * (x * z * sq - y * sc), m14: 0.0, m21: 2.0 * (x * y * sq - z * sc), m22: 1.0 - 2.0 * (x * x + z * z) * sq, m23: 2.0 * (y * z * sq + x * sc), m24: 0.0, m31: 2.0 * (x * z * sq + y * sc), m32: 2.0 * (y * z * sq - x * sc), m33: 1.0 - 2.0 * (x * x + y * y) * sq, m34: 0.0, m41: 0.0, m42: 0.0, m43: 0.0, m44: 1.0 } } /// A 2d matrix for interpolation. #[derive(Clone, ComputeSquaredDistance, Copy, Debug)] #[cfg_attr(feature = "servo", derive(MallocSizeOf))] #[allow(missing_docs)] // FIXME: We use custom derive for ComputeSquaredDistance. However, If possible, we should convert // the InnerMatrix2D into types with physical meaning. This custom derive computes the squared // distance from each matrix item, and this makes the result different from that in Gecko if we // have skew factor in the Matrix3D. pub struct InnerMatrix2D { pub m11: CSSFloat, pub m12: CSSFloat, pub m21: CSSFloat, pub m22: CSSFloat, } /// A 2d translation function. #[cfg_attr(feature = "servo", derive(MallocSizeOf))] #[derive(Animate, Clone, ComputeSquaredDistance, Copy, Debug)] pub struct Translate2D(f32, f32); /// A 2d scale function. #[derive(Clone, ComputeSquaredDistance, Copy, Debug)] #[cfg_attr(feature = "servo", derive(MallocSizeOf))] pub struct Scale2D(f32, f32); /// A decomposed 2d matrix. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(MallocSizeOf))] pub struct MatrixDecomposed2D { /// The translation function. pub translate: Translate2D, /// The scale function. pub scale: Scale2D, /// The rotation angle. pub angle: f32, /// The inner matrix. pub matrix: InnerMatrix2D, } impl Animate for InnerMatrix2D { fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { Ok(InnerMatrix2D { m11: animate_multiplicative_factor(self.m11, other.m11, procedure)?, m12: self.m12.animate(&other.m12, procedure)?, m21: self.m21.animate(&other.m21, procedure)?, m22: animate_multiplicative_factor(self.m22, other.m22, procedure)?, }) } } impl Animate for Scale2D { fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { Ok(Scale2D( animate_multiplicative_factor(self.0, other.0, procedure)?, animate_multiplicative_factor(self.1, other.1, procedure)?, )) } } impl Animate for MatrixDecomposed2D { /// <https://drafts.csswg.org/css-transforms/#interpolation-of-decomposed-2d-matrix-values> fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { // If x-axis of one is flipped, and y-axis of the other, // convert to an unflipped rotation. let mut scale = self.scale; let mut angle = self.angle; let mut other_angle = other.angle; if (scale.0 < 0.0 && other.scale.1 < 0.0) || (scale.1 < 0.0 && other.scale.0 < 0.0) { scale.0 = -scale.0; scale.1 = -scale.1; angle += if angle < 0.0 {180.} else {-180.}; } // Don't rotate the long way around. if angle == 0.0 { angle = 360. } if other_angle == 0.0 { other_angle = 360. } if (angle - other_angle).abs() > 180. { if angle > other_angle { angle -= 360. } else{ other_angle -= 360. } } // Interpolate all values. let translate = self.translate.animate(&other.translate, procedure)?; let scale = scale.animate(&other.scale, procedure)?; let angle = angle.animate(&other_angle, procedure)?; let matrix = self.matrix.animate(&other.matrix, procedure)?; Ok(MatrixDecomposed2D { translate: translate, scale: scale, angle: angle, matrix: matrix, }) } } impl ComputeSquaredDistance for MatrixDecomposed2D { #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> { // Use Radian to compute the distance. const RAD_PER_DEG: f64 = ::std::f64::consts::PI / 180.0; let angle1 = self.angle as f64 * RAD_PER_DEG; let angle2 = other.angle as f64 * RAD_PER_DEG; Ok(self.translate.compute_squared_distance(&other.translate)? + self.scale.compute_squared_distance(&other.scale)? + angle1.compute_squared_distance(&angle2)? + self.matrix.compute_squared_distance(&other.matrix)?) } } impl Animate for Matrix3D { #[cfg(feature = "servo")] fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { if self.is_3d() || other.is_3d() { let decomposed_from = decompose_3d_matrix(*self); let decomposed_to = decompose_3d_matrix(*other); match (decomposed_from, decomposed_to) { (Ok(this), Ok(other)) => { Ok(Matrix3D::from(this.animate(&other, procedure)?)) }, // Matrices can be undecomposable due to couple reasons, e.g., // non-invertible matrices. In this case, we should report Err // here, and let the caller do the fallback procedure. _ => Err(()) } } else { let this = MatrixDecomposed2D::from(*self); let other = MatrixDecomposed2D::from(*other); Ok(Matrix3D::from(this.animate(&other, procedure)?)) } } #[cfg(feature = "gecko")] // Gecko doesn't exactly follow the spec here; we use a different procedure // to match it fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { let (from, to) = if self.is_3d() || other.is_3d() { (decompose_3d_matrix(*self), decompose_3d_matrix(*other)) } else { (decompose_2d_matrix(self), decompose_2d_matrix(other)) }; match (from, to) { (Ok(from), Ok(to)) => { Ok(Matrix3D::from(from.animate(&to, procedure)?)) }, // Matrices can be undecomposable due to couple reasons, e.g., // non-invertible matrices. In this case, we should report Err here, // and let the caller do the fallback procedure. _ => Err(()) } } } impl Animate for Matrix { #[cfg(feature = "servo")] fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { let this = Matrix3D::from(*self); let other = Matrix3D::from(*other); let this = MatrixDecomposed2D::from(this); let other = MatrixDecomposed2D::from(other); Ok(Matrix3D::from(this.animate(&other, procedure)?).into_2d()?) } #[cfg(feature = "gecko")] // Gecko doesn't exactly follow the spec here; we use a different procedure // to match it fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { let from = decompose_2d_matrix(&(*self).into()); let to = decompose_2d_matrix(&(*other).into()); match (from, to) { (Ok(from), Ok(to)) => { Matrix3D::from(from.animate(&to, procedure)?).into_2d() }, // Matrices can be undecomposable due to couple reasons, e.g., // non-invertible matrices. In this case, we should report Err here, // and let the caller do the fallback procedure. _ => Err(()) } } } impl ComputeSquaredDistance for Matrix3D { #[inline] #[cfg(feature = "servo")] fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> { if self.is_3d() || other.is_3d() { let from = decompose_3d_matrix(*self)?; let to = decompose_3d_matrix(*other)?; from.compute_squared_distance(&to) } else { let from = MatrixDecomposed2D::from(*self); let to = MatrixDecomposed2D::from(*other); from.compute_squared_distance(&to) } } #[inline] #[cfg(feature = "gecko")] fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> { let (from, to) = if self.is_3d() || other.is_3d() { (decompose_3d_matrix(*self)?, decompose_3d_matrix(*other)?) } else { (decompose_2d_matrix(self)?, decompose_2d_matrix(other)?) }; from.compute_squared_distance(&to) } } impl From<Matrix3D> for MatrixDecomposed2D { /// Decompose a 2D matrix. /// <https://drafts.csswg.org/css-transforms/#decomposing-a-2d-matrix> fn from(matrix: Matrix3D) -> MatrixDecomposed2D { let mut row0x = matrix.m11; let mut row0y = matrix.m12; let mut row1x = matrix.m21; let mut row1y = matrix.m22; let translate = Translate2D(matrix.m41, matrix.m42); let mut scale = Scale2D((row0x * row0x + row0y * row0y).sqrt(), (row1x * row1x + row1y * row1y).sqrt()); // If determinant is negative, one axis was flipped. let determinant = row0x * row1y - row0y * row1x; if determinant < 0. { if row0x < row1y { scale.0 = -scale.0; } else { scale.1 = -scale.1; } } // Renormalize matrix to remove scale. if scale.0 != 0.0 { row0x *= 1. / scale.0; row0y *= 1. / scale.0; } if scale.1 != 0.0 { row1x *= 1. / scale.1; row1y *= 1. / scale.1; } // Compute rotation and renormalize matrix. let mut angle = row0y.atan2(row0x); if angle != 0.0 { let sn = -row0y; let cs = row0x; let m11 = row0x; let m12 = row0y; let m21 = row1x; let m22 = row1y; row0x = cs * m11 + sn * m21; row0y = cs * m12 + sn * m22; row1x = -sn * m11 + cs * m21; row1y = -sn * m12 + cs * m22; } let m = InnerMatrix2D { m11: row0x, m12: row0y, m21: row1x, m22: row1y, }; // Convert into degrees because our rotation functions expect it. angle = angle.to_degrees(); MatrixDecomposed2D { translate: translate, scale: scale, angle: angle, matrix: m, } } } impl From<MatrixDecomposed2D> for Matrix3D { /// Recompose a 2D matrix. /// <https://drafts.csswg.org/css-transforms/#recomposing-to-a-2d-matrix> fn from(decomposed: MatrixDecomposed2D) -> Matrix3D { let mut computed_matrix = Matrix3D::identity(); computed_matrix.m11 = decomposed.matrix.m11; computed_matrix.m12 = decomposed.matrix.m12; computed_matrix.m21 = decomposed.matrix.m21; computed_matrix.m22 = decomposed.matrix.m22; // Translate matrix. computed_matrix.m41 = decomposed.translate.0; computed_matrix.m42 = decomposed.translate.1; // Rotate matrix. let angle = decomposed.angle.to_radians(); let cos_angle = angle.cos(); let sin_angle = angle.sin(); let mut rotate_matrix = Matrix3D::identity(); rotate_matrix.m11 = cos_angle; rotate_matrix.m12 = sin_angle; rotate_matrix.m21 = -sin_angle; rotate_matrix.m22 = cos_angle; // Multiplication of computed_matrix and rotate_matrix computed_matrix = multiply(rotate_matrix, computed_matrix); // Scale matrix. computed_matrix.m11 *= decomposed.scale.0; computed_matrix.m12 *= decomposed.scale.0; computed_matrix.m21 *= decomposed.scale.1; computed_matrix.m22 *= decomposed.scale.1; computed_matrix } } #[cfg(feature = "gecko")] impl<'a> From< &'a RawGeckoGfxMatrix4x4> for Matrix3D { fn from(m: &'a RawGeckoGfxMatrix4x4) -> Matrix3D { Matrix3D { m11: m[0], m12: m[1], m13: m[2], m14: m[3], m21: m[4], m22: m[5], m23: m[6], m24: m[7], m31: m[8], m32: m[9], m33: m[10], m34: m[11], m41: m[12], m42: m[13], m43: m[14], m44: m[15], } } } #[cfg(feature = "gecko")] impl From<Matrix3D> for RawGeckoGfxMatrix4x4 { fn from(matrix: Matrix3D) -> RawGeckoGfxMatrix4x4 { [ matrix.m11, matrix.m12, matrix.m13, matrix.m14, matrix.m21, matrix.m22, matrix.m23, matrix.m24, matrix.m31, matrix.m32, matrix.m33, matrix.m34, matrix.m41, matrix.m42, matrix.m43, matrix.m44 ] } } /// A 3d translation. #[cfg_attr(feature = "servo", derive(MallocSizeOf))] #[derive(Animate, Clone, ComputeSquaredDistance, Copy, Debug)] pub struct Translate3D(f32, f32, f32); /// A 3d scale function. #[derive(Clone, ComputeSquaredDistance, Copy, Debug)] #[cfg_attr(feature = "servo", derive(MallocSizeOf))] pub struct Scale3D(f32, f32, f32); /// A 3d skew function. #[cfg_attr(feature = "servo", derive(MallocSizeOf))] #[derive(Animate, Clone, Copy, Debug)] pub struct Skew(f32, f32, f32); /// A 3d perspective transformation. #[derive(Clone, ComputeSquaredDistance, Copy, Debug)] #[cfg_attr(feature = "servo", derive(MallocSizeOf))] pub struct Perspective(f32, f32, f32, f32); /// A quaternion used to represent a rotation. #[derive(Clone, Copy, Debug)] #[cfg_attr(feature = "servo", derive(MallocSizeOf))] pub struct Quaternion(f64, f64, f64, f64); /// A decomposed 3d matrix. #[derive(Clone, ComputeSquaredDistance, Copy, Debug)] #[cfg_attr(feature = "servo", derive(MallocSizeOf))] pub struct MatrixDecomposed3D { /// A translation function. pub translate: Translate3D, /// A scale function. pub scale: Scale3D, /// The skew component of the transformation. pub skew: Skew, /// The perspective component of the transformation. pub perspective: Perspective, /// The quaternion used to represent the rotation. pub quaternion: Quaternion, } impl Quaternion { /// Return a quaternion from a unit direction vector and angle (unit: radian). #[inline] fn from_direction_and_angle(vector: &DirectionVector, angle: f64) -> Self { debug_assert!((vector.length() - 1.).abs() < 0.0001, "Only accept an unit direction vector to create a quaternion"); // Reference: // https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation // // if the direction axis is (x, y, z) = xi + yj + zk, // and the angle is |theta|, this formula can be done using // an extension of Euler's formula: // q = cos(theta/2) + (xi + yj + zk)(sin(theta/2)) // = cos(theta/2) + // x*sin(theta/2)i + y*sin(theta/2)j + z*sin(theta/2)k Quaternion(vector.x as f64 * (angle / 2.).sin(), vector.y as f64 * (angle / 2.).sin(), vector.z as f64 * (angle / 2.).sin(), (angle / 2.).cos()) } /// Calculate the dot product. #[inline] fn dot(&self, other: &Self) -> f64 { self.0 * other.0 + self.1 * other.1 + self.2 * other.2 + self.3 * other.3 } } impl ComputeSquaredDistance for Quaternion { #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> { // Use quaternion vectors to get the angle difference. Both q1 and q2 are unit vectors, // so we can get their angle difference by: // cos(theta/2) = (q1 dot q2) / (|q1| * |q2|) = q1 dot q2. let distance = self.dot(other).max(-1.0).min(1.0).acos() * 2.0; Ok(SquaredDistance::Value(distance * distance)) } } /// Decompose a 3D matrix. /// <https://drafts.csswg.org/css-transforms/#decomposing-a-3d-matrix> fn decompose_3d_matrix(mut matrix: Matrix3D) -> Result<MatrixDecomposed3D, ()> { // Normalize the matrix. if matrix.m44 == 0.0 { return Err(()); } let scaling_factor = matrix.m44; % for i in range(1, 5): % for j in range(1, 5): matrix.m${i}${j} /= scaling_factor; % endfor % endfor // perspective_matrix is used to solve for perspective, but it also provides // an easy way to test for singularity of the upper 3x3 component. let mut perspective_matrix = matrix; % for i in range(1, 4): perspective_matrix.m${i}4 = 0.0; % endfor perspective_matrix.m44 = 1.0; if perspective_matrix.determinant() == 0.0 { return Err(()); } // First, isolate perspective. let perspective = if matrix.m14 != 0.0 || matrix.m24 != 0.0 || matrix.m34 != 0.0 { let right_hand_side: [f32; 4] = [ matrix.m14, matrix.m24, matrix.m34, matrix.m44 ]; perspective_matrix = perspective_matrix.inverse().unwrap(); // Transpose perspective_matrix perspective_matrix = Matrix3D { % for i in range(1, 5): % for j in range(1, 5): m${i}${j}: perspective_matrix.m${j}${i}, % endfor % endfor }; // Multiply right_hand_side with perspective_matrix let mut tmp: [f32; 4] = [0.0; 4]; % for i in range(1, 5): tmp[${i - 1}] = (right_hand_side[0] * perspective_matrix.m1${i}) + (right_hand_side[1] * perspective_matrix.m2${i}) + (right_hand_side[2] * perspective_matrix.m3${i}) + (right_hand_side[3] * perspective_matrix.m4${i}); % endfor Perspective(tmp[0], tmp[1], tmp[2], tmp[3]) } else { Perspective(0.0, 0.0, 0.0, 1.0) }; // Next take care of translation let translate = Translate3D ( matrix.m41, matrix.m42, matrix.m43 ); // Now get scale and shear. 'row' is a 3 element array of 3 component vectors let mut row: [[f32; 3]; 3] = [[0.0; 3]; 3]; % for i in range(1, 4): row[${i - 1}][0] = matrix.m${i}1; row[${i - 1}][1] = matrix.m${i}2; row[${i - 1}][2] = matrix.m${i}3; % endfor // Compute X scale factor and normalize first row. let row0len = (row[0][0] * row[0][0] + row[0][1] * row[0][1] + row[0][2] * row[0][2]).sqrt(); let mut scale = Scale3D(row0len, 0.0, 0.0); row[0] = [row[0][0] / row0len, row[0][1] / row0len, row[0][2] / row0len]; // Compute XY shear factor and make 2nd row orthogonal to 1st. let mut skew = Skew(dot(row[0], row[1]), 0.0, 0.0); row[1] = combine(row[1], row[0], 1.0, -skew.0); // Now, compute Y scale and normalize 2nd row. let row1len = (row[1][0] * row[1][0] + row[1][1] * row[1][1] + row[1][2] * row[1][2]).sqrt(); scale.1 = row1len; row[1] = [row[1][0] / row1len, row[1][1] / row1len, row[1][2] / row1len]; skew.0 /= scale.1; // Compute XZ and YZ shears, orthogonalize 3rd row skew.1 = dot(row[0], row[2]); row[2] = combine(row[2], row[0], 1.0, -skew.1); skew.2 = dot(row[1], row[2]); row[2] = combine(row[2], row[1], 1.0, -skew.2); // Next, get Z scale and normalize 3rd row. let row2len = (row[2][0] * row[2][0] + row[2][1] * row[2][1] + row[2][2] * row[2][2]).sqrt(); scale.2 = row2len; row[2] = [row[2][0] / row2len, row[2][1] / row2len, row[2][2] / row2len]; skew.1 /= scale.2; skew.2 /= scale.2; // At this point, the matrix (in rows) is orthonormal. // Check for a coordinate system flip. If the determinant // is -1, then negate the matrix and the scaling factors. let pdum3 = cross(row[1], row[2]); if dot(row[0], pdum3) < 0.0 { % for i in range(3): scale.${i} *= -1.0; row[${i}][0] *= -1.0; row[${i}][1] *= -1.0; row[${i}][2] *= -1.0; % endfor } // Now, get the rotations out let mut quaternion = Quaternion ( 0.5 * ((1.0 + row[0][0] - row[1][1] - row[2][2]).max(0.0) as f64).sqrt(), 0.5 * ((1.0 - row[0][0] + row[1][1] - row[2][2]).max(0.0) as f64).sqrt(), 0.5 * ((1.0 - row[0][0] - row[1][1] + row[2][2]).max(0.0) as f64).sqrt(), 0.5 * ((1.0 + row[0][0] + row[1][1] + row[2][2]).max(0.0) as f64).sqrt() ); if row[2][1] > row[1][2] { quaternion.0 = -quaternion.0 } if row[0][2] > row[2][0] { quaternion.1 = -quaternion.1 } if row[1][0] > row[0][1] { quaternion.2 = -quaternion.2 } Ok(MatrixDecomposed3D { translate: translate, scale: scale, skew: skew, perspective: perspective, quaternion: quaternion }) } /// Decompose a 2D matrix for Gecko. // Use the algorithm from nsStyleTransformMatrix::Decompose2DMatrix() in Gecko. #[cfg(feature = "gecko")] fn decompose_2d_matrix(matrix: &Matrix3D) -> Result<MatrixDecomposed3D, ()> { // The index is column-major, so the equivalent transform matrix is: // | m11 m21 0 m41 | => | m11 m21 | and translate(m41, m42) // | m12 m22 0 m42 | | m12 m22 | // | 0 0 1 0 | // | 0 0 0 1 | let (mut m11, mut m12) = (matrix.m11, matrix.m12); let (mut m21, mut m22) = (matrix.m21, matrix.m22); // Check if this is a singular matrix. if m11 * m22 == m12 * m21 { return Err(()); } let mut scale_x = (m11 * m11 + m12 * m12).sqrt(); m11 /= scale_x; m12 /= scale_x; let mut shear_xy = m11 * m21 + m12 * m22; m21 -= m11 * shear_xy; m22 -= m12 * shear_xy; let scale_y = (m21 * m21 + m22 * m22).sqrt(); m21 /= scale_y; m22 /= scale_y; shear_xy /= scale_y; let determinant = m11 * m22 - m12 * m21; // Determinant should now be 1 or -1. if 0.99 > determinant.abs() || determinant.abs() > 1.01 { return Err(()); } if determinant < 0. { m11 = -m11; m12 = -m12; shear_xy = -shear_xy; scale_x = -scale_x; } Ok(MatrixDecomposed3D { translate: Translate3D(matrix.m41, matrix.m42, 0.), scale: Scale3D(scale_x, scale_y, 1.), skew: Skew(shear_xy, 0., 0.), perspective: Perspective(0., 0., 0., 1.), quaternion: Quaternion::from_direction_and_angle(&DirectionVector::new(0., 0., 1.), m12.atan2(m11) as f64) }) } // Combine 2 point. fn combine(a: [f32; 3], b: [f32; 3], ascl: f32, bscl: f32) -> [f32; 3] { [ (ascl * a[0]) + (bscl * b[0]), (ascl * a[1]) + (bscl * b[1]), (ascl * a[2]) + (bscl * b[2]) ] } // Dot product. fn dot(a: [f32; 3], b: [f32; 3]) -> f32 { a[0] * b[0] + a[1] * b[1] + a[2] * b[2] } // Cross product. fn cross(row1: [f32; 3], row2: [f32; 3]) -> [f32; 3] { [ row1[1] * row2[2] - row1[2] * row2[1], row1[2] * row2[0] - row1[0] * row2[2], row1[0] * row2[1] - row1[1] * row2[0] ] } impl Animate for Scale3D { fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { Ok(Scale3D( animate_multiplicative_factor(self.0, other.0, procedure)?, animate_multiplicative_factor(self.1, other.1, procedure)?, animate_multiplicative_factor(self.2, other.2, procedure)?, )) } } impl ComputeSquaredDistance for Skew { // We have to use atan() to convert the skew factors into skew angles, so implement // ComputeSquaredDistance manually. #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> { Ok(self.0.atan().compute_squared_distance(&other.0.atan())? + self.1.atan().compute_squared_distance(&other.1.atan())? + self.2.atan().compute_squared_distance(&other.2.atan())?) } } impl Animate for Perspective { fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { Ok(Perspective( self.0.animate(&other.0, procedure)?, self.1.animate(&other.1, procedure)?, self.2.animate(&other.2, procedure)?, animate_multiplicative_factor(self.3, other.3, procedure)?, )) } } impl Animate for MatrixDecomposed3D { /// <https://drafts.csswg.org/css-transforms/#interpolation-of-decomposed-3d-matrix-values> fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { use std::f64; let (this_weight, other_weight) = procedure.weights(); debug_assert!((this_weight + other_weight - 1.0f64).abs() <= f64::EPSILON || other_weight == 1.0f64 || other_weight == 0.0f64, "animate should only be used for interpolating or accumulating transforms"); let mut sum = *self; // Add translate, scale, skew and perspective components. sum.translate = self.translate.animate(&other.translate, procedure)?; sum.scale = self.scale.animate(&other.scale, procedure)?; sum.skew = self.skew.animate(&other.skew, procedure)?; sum.perspective = self.perspective.animate(&other.perspective, procedure)?; // Add quaternions using spherical linear interpolation (Slerp). // // We take a specialized code path for accumulation (where other_weight is 1) if other_weight == 1.0 { if this_weight == 0.0 { return Ok(*other) } let clamped_w = self.quaternion.3.min(1.0).max(-1.0); // Determine the scale factor. let mut theta = clamped_w.acos(); let mut scale = if theta == 0.0 { 0.0 } else { 1.0 / theta.sin() }; theta *= this_weight; scale *= theta.sin(); // Scale the self matrix by this_weight. let mut scaled_self = *self; % for i in range(3): scaled_self.quaternion.${i} *= scale; % endfor scaled_self.quaternion.3 = theta.cos(); // Multiply scaled-self by other. let a = &scaled_self.quaternion; let b = &other.quaternion; sum.quaternion = Quaternion( a.3 * b.0 + a.0 * b.3 + a.1 * b.2 - a.2 * b.1, a.3 * b.1 - a.0 * b.2 + a.1 * b.3 + a.2 * b.0, a.3 * b.2 + a.0 * b.1 - a.1 * b.0 + a.2 * b.3, a.3 * b.3 - a.0 * b.0 - a.1 * b.1 - a.2 * b.2, ); } else { let mut product = self.quaternion.0 * other.quaternion.0 + self.quaternion.1 * other.quaternion.1 + self.quaternion.2 * other.quaternion.2 + self.quaternion.3 * other.quaternion.3; // Clamp product to -1.0 <= product <= 1.0 product = product.min(1.0); product = product.max(-1.0); if product == 1.0 { return Ok(sum); } let theta = product.acos(); let w = (other_weight * theta).sin() * 1.0 / (1.0 - product * product).sqrt(); let mut a = *self; let mut b = *other; % for i in range(4): a.quaternion.${i} *= (other_weight * theta).cos() - product * w; b.quaternion.${i} *= w; sum.quaternion.${i} = a.quaternion.${i} + b.quaternion.${i}; % endfor } Ok(sum) } } impl From<MatrixDecomposed3D> for Matrix3D { /// Recompose a 3D matrix. /// <https://drafts.csswg.org/css-transforms/#recomposing-to-a-3d-matrix> fn from(decomposed: MatrixDecomposed3D) -> Matrix3D { let mut matrix = Matrix3D::identity(); // Apply perspective % for i in range(1, 5): matrix.m${i}4 = decomposed.perspective.${i - 1}; % endfor // Apply translation % for i in range(1, 4): % for j in range(1, 4): matrix.m4${i} += decomposed.translate.${j - 1} * matrix.m${j}${i}; % endfor % endfor // Apply rotation let x = decomposed.quaternion.0; let y = decomposed.quaternion.1; let z = decomposed.quaternion.2; let w = decomposed.quaternion.3; // Construct a composite rotation matrix from the quaternion values // rotationMatrix is a identity 4x4 matrix initially let mut rotation_matrix = Matrix3D::identity(); rotation_matrix.m11 = 1.0 - 2.0 * (y * y + z * z) as f32; rotation_matrix.m12 = 2.0 * (x * y + z * w) as f32; rotation_matrix.m13 = 2.0 * (x * z - y * w) as f32; rotation_matrix.m21 = 2.0 * (x * y - z * w) as f32; rotation_matrix.m22 = 1.0 - 2.0 * (x * x + z * z) as f32; rotation_matrix.m23 = 2.0 * (y * z + x * w) as f32; rotation_matrix.m31 = 2.0 * (x * z + y * w) as f32; rotation_matrix.m32 = 2.0 * (y * z - x * w) as f32; rotation_matrix.m33 = 1.0 - 2.0 * (x * x + y * y) as f32; matrix = multiply(rotation_matrix, matrix); // Apply skew let mut temp = Matrix3D::identity(); if decomposed.skew.2 != 0.0 { temp.m32 = decomposed.skew.2; matrix = multiply(temp, matrix); } if decomposed.skew.1 != 0.0 { temp.m32 = 0.0; temp.m31 = decomposed.skew.1; matrix = multiply(temp, matrix); } if decomposed.skew.0 != 0.0 { temp.m31 = 0.0; temp.m21 = decomposed.skew.0; matrix = multiply(temp, matrix); } // Apply scale % for i in range(1, 4): % for j in range(1, 4): matrix.m${i}${j} *= decomposed.scale.${i - 1}; % endfor % endfor matrix } } // Multiplication of two 4x4 matrices. fn multiply(a: Matrix3D, b: Matrix3D) -> Matrix3D { let mut a_clone = a; % for i in range(1, 5): % for j in range(1, 5): a_clone.m${i}${j} = (a.m${i}1 * b.m1${j}) + (a.m${i}2 * b.m2${j}) + (a.m${i}3 * b.m3${j}) + (a.m${i}4 * b.m4${j}); % endfor % endfor a_clone } impl Matrix3D { fn is_3d(&self) -> bool { self.m13 != 0.0 || self.m14 != 0.0 || self.m23 != 0.0 || self.m24 != 0.0 || self.m31 != 0.0 || self.m32 != 0.0 || self.m33 != 1.0 || self.m34 != 0.0 || self.m43 != 0.0 || self.m44 != 1.0 } fn determinant(&self) -> CSSFloat { self.m14 * self.m23 * self.m32 * self.m41 - self.m13 * self.m24 * self.m32 * self.m41 - self.m14 * self.m22 * self.m33 * self.m41 + self.m12 * self.m24 * self.m33 * self.m41 + self.m13 * self.m22 * self.m34 * self.m41 - self.m12 * self.m23 * self.m34 * self.m41 - self.m14 * self.m23 * self.m31 * self.m42 + self.m13 * self.m24 * self.m31 * self.m42 + self.m14 * self.m21 * self.m33 * self.m42 - self.m11 * self.m24 * self.m33 * self.m42 - self.m13 * self.m21 * self.m34 * self.m42 + self.m11 * self.m23 * self.m34 * self.m42 + self.m14 * self.m22 * self.m31 * self.m43 - self.m12 * self.m24 * self.m31 * self.m43 - self.m14 * self.m21 * self.m32 * self.m43 + self.m11 * self.m24 * self.m32 * self.m43 + self.m12 * self.m21 * self.m34 * self.m43 - self.m11 * self.m22 * self.m34 * self.m43 - self.m13 * self.m22 * self.m31 * self.m44 + self.m12 * self.m23 * self.m31 * self.m44 + self.m13 * self.m21 * self.m32 * self.m44 - self.m11 * self.m23 * self.m32 * self.m44 - self.m12 * self.m21 * self.m33 * self.m44 + self.m11 * self.m22 * self.m33 * self.m44 } fn inverse(&self) -> Option<Matrix3D> { let mut det = self.determinant(); if det == 0.0 { return None; } det = 1.0 / det; let x = Matrix3D { m11: det * (self.m23*self.m34*self.m42 - self.m24*self.m33*self.m42 + self.m24*self.m32*self.m43 - self.m22*self.m34*self.m43 - self.m23*self.m32*self.m44 + self.m22*self.m33*self.m44), m12: det * (self.m14*self.m33*self.m42 - self.m13*self.m34*self.m42 - self.m14*self.m32*self.m43 + self.m12*self.m34*self.m43 + self.m13*self.m32*self.m44 - self.m12*self.m33*self.m44), m13: det * (self.m13*self.m24*self.m42 - self.m14*self.m23*self.m42 + self.m14*self.m22*self.m43 - self.m12*self.m24*self.m43 - self.m13*self.m22*self.m44 + self.m12*self.m23*self.m44), m14: det * (self.m14*self.m23*self.m32 - self.m13*self.m24*self.m32 - self.m14*self.m22*self.m33 + self.m12*self.m24*self.m33 + self.m13*self.m22*self.m34 - self.m12*self.m23*self.m34), m21: det * (self.m24*self.m33*self.m41 - self.m23*self.m34*self.m41 - self.m24*self.m31*self.m43 + self.m21*self.m34*self.m43 + self.m23*self.m31*self.m44 - self.m21*self.m33*self.m44), m22: det * (self.m13*self.m34*self.m41 - self.m14*self.m33*self.m41 + self.m14*self.m31*self.m43 - self.m11*self.m34*self.m43 - self.m13*self.m31*self.m44 + self.m11*self.m33*self.m44), m23: det * (self.m14*self.m23*self.m41 - self.m13*self.m24*self.m41 - self.m14*self.m21*self.m43 + self.m11*self.m24*self.m43 + self.m13*self.m21*self.m44 - self.m11*self.m23*self.m44), m24: det * (self.m13*self.m24*self.m31 - self.m14*self.m23*self.m31 + self.m14*self.m21*self.m33 - self.m11*self.m24*self.m33 - self.m13*self.m21*self.m34 + self.m11*self.m23*self.m34), m31: det * (self.m22*self.m34*self.m41 - self.m24*self.m32*self.m41 + self.m24*self.m31*self.m42 - self.m21*self.m34*self.m42 - self.m22*self.m31*self.m44 + self.m21*self.m32*self.m44), m32: det * (self.m14*self.m32*self.m41 - self.m12*self.m34*self.m41 - self.m14*self.m31*self.m42 + self.m11*self.m34*self.m42 + self.m12*self.m31*self.m44 - self.m11*self.m32*self.m44), m33: det * (self.m12*self.m24*self.m41 - self.m14*self.m22*self.m41 + self.m14*self.m21*self.m42 - self.m11*self.m24*self.m42 - self.m12*self.m21*self.m44 + self.m11*self.m22*self.m44), m34: det * (self.m14*self.m22*self.m31 - self.m12*self.m24*self.m31 - self.m14*self.m21*self.m32 + self.m11*self.m24*self.m32 + self.m12*self.m21*self.m34 - self.m11*self.m22*self.m34), m41: det * (self.m23*self.m32*self.m41 - self.m22*self.m33*self.m41 - self.m23*self.m31*self.m42 + self.m21*self.m33*self.m42 + self.m22*self.m31*self.m43 - self.m21*self.m32*self.m43), m42: det * (self.m12*self.m33*self.m41 - self.m13*self.m32*self.m41 + self.m13*self.m31*self.m42 - self.m11*self.m33*self.m42 - self.m12*self.m31*self.m43 + self.m11*self.m32*self.m43), m43: det * (self.m13*self.m22*self.m41 - self.m12*self.m23*self.m41 - self.m13*self.m21*self.m42 + self.m11*self.m23*self.m42 + self.m12*self.m21*self.m43 - self.m11*self.m22*self.m43), m44: det * (self.m12*self.m23*self.m31 - self.m13*self.m22*self.m31 + self.m13*self.m21*self.m32 - self.m11*self.m23*self.m32 - self.m12*self.m21*self.m33 + self.m11*self.m22*self.m33), }; Some(x) } } /// <https://drafts.csswg.org/css-transforms/#interpolation-of-transforms> impl Animate for ComputedTransform { #[inline] fn animate( &self, other_: &Self, procedure: Procedure, ) -> Result<Self, ()> { let animate_equal_lists = |this: &[ComputedTransformOperation], other: &[ComputedTransformOperation]| -> Result<ComputedTransform, ()> { Ok(Transform(this.iter().zip(other) .map(|(this, other)| this.animate(other, procedure)) .collect::<Result<Vec<_>, _>>()?)) // If we can't animate for a pair of matched transform lists // this means we have at least one undecomposable matrix, // so we should bubble out Err here, and let the caller do // the fallback procedure. }; if self.0.is_empty() && other_.0.is_empty() { return Ok(Transform(vec![])); } let this = &self.0; let other = &other_.0; if procedure == Procedure::Add { let result = this.iter().chain(other).cloned().collect::<Vec<_>>(); return Ok(Transform(result)); } // For matched transform lists. { if this.len() == other.len() { let is_matched_transforms = this.iter().zip(other).all(|(this, other)| { is_matched_operation(this, other) }); if is_matched_transforms { return animate_equal_lists(this, other); } } } // For mismatched transform lists. let mut owned_this = this.clone(); let mut owned_other = other.clone(); if this.is_empty() { let this = other_.to_animated_zero()?.0; if this.iter().zip(other).all(|(this, other)| is_matched_operation(this, other)) { return animate_equal_lists(&this, other) } owned_this = this; } if other.is_empty() { let other = self.to_animated_zero()?.0; if this.iter().zip(&other).all(|(this, other)| is_matched_operation(this, other)) { return animate_equal_lists(this, &other) } owned_other = other; } match procedure { Procedure::Add => Err(()), Procedure::Interpolate { progress } => { Ok(Transform(vec![TransformOperation::InterpolateMatrix { from_list: Transform(owned_this), to_list: Transform(owned_other), progress: Percentage(progress as f32), }])) }, Procedure::Accumulate { count } => { Ok(Transform(vec![TransformOperation::AccumulateMatrix { from_list: Transform(owned_this), to_list: Transform(owned_other), count: cmp::min(count, i32::max_value() as u64) as i32, }])) }, } } } // This might not be the most useful definition of distance. It might be better, for example, // to trace the distance travelled by a point as its transform is interpolated between the two // lists. That, however, proves to be quite complicated so we take a simple approach for now. // See https://bugzilla.mozilla.org/show_bug.cgi?id=1318591#c0. impl ComputeSquaredDistance for ComputedTransformOperation { fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> { // For translate, We don't want to require doing layout in order to calculate the result, so // drop the percentage part. However, dropping percentage makes us impossible to // compute the distance for the percentage-percentage case, but Gecko uses the // same formula, so it's fine for now. // Note: We use pixel value to compute the distance for translate, so we have to // convert Au into px. let extract_pixel_length = |lop: &LengthOrPercentage| { match *lop { LengthOrPercentage::Length(px) => px.px(), LengthOrPercentage::Percentage(_) => 0., LengthOrPercentage::Calc(calc) => calc.length().px(), } }; match (self, other) { ( &TransformOperation::Matrix3D(ref this), &TransformOperation::Matrix3D(ref other), ) => { this.compute_squared_distance(other) }, ( &TransformOperation::Matrix(ref this), &TransformOperation::Matrix(ref other), ) => { let this: Matrix3D = (*this).into(); let other: Matrix3D = (*other).into(); this.compute_squared_distance(&other) }, ( &TransformOperation::Skew(ref fx, ref fy), &TransformOperation::Skew(ref tx, ref ty), ) => { Ok( fx.compute_squared_distance(&tx)? + fy.compute_squared_distance(&ty)?, ) }, ( &TransformOperation::SkewX(ref f), &TransformOperation::SkewX(ref t), ) | ( &TransformOperation::SkewY(ref f), &TransformOperation::SkewY(ref t), ) => { f.compute_squared_distance(&t) }, ( &TransformOperation::Translate3D(ref fx, ref fy, ref fz), &TransformOperation::Translate3D(ref tx, ref ty, ref tz), ) => { let fx = extract_pixel_length(&fx); let fy = extract_pixel_length(&fy); let tx = extract_pixel_length(&tx); let ty = extract_pixel_length(&ty); Ok( fx.compute_squared_distance(&tx)? + fy.compute_squared_distance(&ty)? + fz.compute_squared_distance(&tz)?, ) }, ( &TransformOperation::Scale3D(ref fx, ref fy, ref fz), &TransformOperation::Scale3D(ref tx, ref ty, ref tz), ) => { Ok( fx.compute_squared_distance(&tx)? + fy.compute_squared_distance(&ty)? + fz.compute_squared_distance(&tz)?, ) }, ( &TransformOperation::Rotate3D(fx, fy, fz, fa), &TransformOperation::Rotate3D(tx, ty, tz, ta), ) => { let (fx, fy, fz, angle1) = ComputedTransform::get_normalized_vector_and_angle(fx, fy, fz, fa); let (tx, ty, tz, angle2) = ComputedTransform::get_normalized_vector_and_angle(tx, ty, tz, ta); if (fx, fy, fz) == (tx, ty, tz) { angle1.compute_squared_distance(&angle2) } else { let v1 = DirectionVector::new(fx, fy, fz); let v2 = DirectionVector::new(tx, ty, tz); let q1 = Quaternion::from_direction_and_angle(&v1, angle1.radians64()); let q2 = Quaternion::from_direction_and_angle(&v2, angle2.radians64()); q1.compute_squared_distance(&q2) } } ( &TransformOperation::RotateX(fa), &TransformOperation::RotateX(ta), ) | ( &TransformOperation::RotateY(fa), &TransformOperation::RotateY(ta), ) | ( &TransformOperation::RotateZ(fa), &TransformOperation::RotateZ(ta), ) | ( &TransformOperation::Rotate(fa), &TransformOperation::Rotate(ta), ) => { fa.compute_squared_distance(&ta) } ( &TransformOperation::Perspective(ref fd), &TransformOperation::Perspective(ref td), ) => { let mut fd_matrix = Matrix3D::identity(); let mut td_matrix = Matrix3D::identity(); if fd.px() > 0. { fd_matrix.m34 = -1. / fd.px(); } if td.px() > 0. { td_matrix.m34 = -1. / td.px(); } fd_matrix.compute_squared_distance(&td_matrix) } ( &TransformOperation::Perspective(ref p), &TransformOperation::Matrix3D(ref m), ) | ( &TransformOperation::Matrix3D(ref m), &TransformOperation::Perspective(ref p), ) => { let mut p_matrix = Matrix3D::identity(); if p.px() > 0. { p_matrix.m34 = -1. / p.px(); } p_matrix.compute_squared_distance(&m) } // Gecko cross-interpolates amongst all translate and all scale // functions (See ToPrimitive in layout/style/StyleAnimationValue.cpp) // without falling back to InterpolateMatrix _ if self.is_translate() && other.is_translate() => { self.to_translate_3d().compute_squared_distance(&other.to_translate_3d()) } _ if self.is_scale() && other.is_scale() => { self.to_scale_3d().compute_squared_distance(&other.to_scale_3d()) } _ => Err(()), } } } impl ComputeSquaredDistance for ComputedTransform { #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> { let list1 = &self.0; let list2 = &other.0; let squared_dist: Result<SquaredDistance, _> = list1.iter().zip_longest(list2).map(|it| { match it { EitherOrBoth::Both(this, other) => { this.compute_squared_distance(other) }, EitherOrBoth::Left(list) | EitherOrBoth::Right(list) => { list.to_animated_zero()?.compute_squared_distance(list) }, } }).sum(); // Roll back to matrix interpolation if there is any Err(()) in the transform lists, such // as mismatched transform functions. if let Err(_) = squared_dist { let matrix1: Matrix3D = self.to_transform_3d_matrix(None).ok_or(())?.into(); let matrix2: Matrix3D = other.to_transform_3d_matrix(None).ok_or(())?.into(); return matrix1.compute_squared_distance(&matrix2); } squared_dist } } /// Animated SVGPaint pub type IntermediateSVGPaint = SVGPaint<AnimatedRGBA, ComputedUrl>; /// Animated SVGPaintKind pub type IntermediateSVGPaintKind = SVGPaintKind<AnimatedRGBA, ComputedUrl>; impl ToAnimatedZero for IntermediateSVGPaint { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { Ok(IntermediateSVGPaint { kind: self.kind.to_animated_zero()?, fallback: self.fallback.and_then(|v| v.to_animated_zero().ok()), }) } } impl From<NonNegativeLengthOrPercentage> for NumberOrPercentage { fn from(lop: NonNegativeLengthOrPercentage) -> NumberOrPercentage { lop.0.into() } } impl From<NonNegativeNumber> for NumberOrPercentage { fn from(num: NonNegativeNumber) -> NumberOrPercentage { num.0.into() } } impl From<LengthOrPercentage> for NumberOrPercentage { fn from(lop: LengthOrPercentage) -> NumberOrPercentage { match lop { LengthOrPercentage::Length(len) => NumberOrPercentage::Number(len.px()), LengthOrPercentage::Percentage(p) => NumberOrPercentage::Percentage(p), LengthOrPercentage::Calc(_) => { panic!("We dont't expected calc interpolation for SvgLengthOrPercentageOrNumber"); }, } } } impl From<Number> for NumberOrPercentage { fn from(num: Number) -> NumberOrPercentage { NumberOrPercentage::Number(num) } } fn convert_to_number_or_percentage<LengthOrPercentageType, NumberType>( from: SvgLengthOrPercentageOrNumber<LengthOrPercentageType, NumberType>) -> NumberOrPercentage where LengthOrPercentageType: Into<NumberOrPercentage>, NumberType: Into<NumberOrPercentage> { match from { SvgLengthOrPercentageOrNumber::LengthOrPercentage(lop) => { lop.into() } SvgLengthOrPercentageOrNumber::Number(num) => { num.into() } } } fn convert_from_number_or_percentage<LengthOrPercentageType, NumberType>( from: NumberOrPercentage) -> SvgLengthOrPercentageOrNumber<LengthOrPercentageType, NumberType> where LengthOrPercentageType: From<LengthOrPercentage>, NumberType: From<Number> { match from { NumberOrPercentage::Number(num) => SvgLengthOrPercentageOrNumber::Number(num.into()), NumberOrPercentage::Percentage(p) => SvgLengthOrPercentageOrNumber::LengthOrPercentage( (LengthOrPercentage::Percentage(p)).into()) } } impl <L, N> Animate for SvgLengthOrPercentageOrNumber<L, N> where L: Animate + From<LengthOrPercentage> + Into<NumberOrPercentage> + Copy, N: Animate + From<Number> + Into<NumberOrPercentage>, LengthOrPercentage: From<L>, Self: Copy, { #[inline] fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { if self.has_calc() || other.has_calc() { // TODO: We need to treat calc value. // https://bugzilla.mozilla.org/show_bug.cgi?id=1386967 return Err(()); } let this = convert_to_number_or_percentage(*self); let other = convert_to_number_or_percentage(*other); match (this, other) { ( NumberOrPercentage::Number(ref this), NumberOrPercentage::Number(ref other), ) => { Ok(convert_from_number_or_percentage( NumberOrPercentage::Number(this.animate(other, procedure)?) )) }, ( NumberOrPercentage::Percentage(ref this), NumberOrPercentage::Percentage(ref other), ) => { Ok(convert_from_number_or_percentage( NumberOrPercentage::Percentage(this.animate(other, procedure)?) )) }, _ => Err(()), } } } impl<L> Animate for SVGLength<L> where L: Animate + Clone, { #[inline] fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { match (self, other) { (&SVGLength::Length(ref this), &SVGLength::Length(ref other)) => { Ok(SVGLength::Length(this.animate(other, procedure)?))<|fim▁hole|> } } /// <https://www.w3.org/TR/SVG11/painting.html#StrokeDasharrayProperty> impl<L> Animate for SVGStrokeDashArray<L> where L: Clone + RepeatableListAnimatable, { #[inline] fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { if matches!(procedure, Procedure::Add | Procedure::Accumulate { .. }) { // Non-additive. return Err(()); } match (self, other) { (&SVGStrokeDashArray::Values(ref this), &SVGStrokeDashArray::Values(ref other)) => { Ok(SVGStrokeDashArray::Values(this.animate(other, procedure)?)) }, _ => Err(()), } } } impl<L> ToAnimatedZero for SVGStrokeDashArray<L> where L: ToAnimatedZero, { #[inline] fn to_animated_zero(&self) -> Result<Self, ()> { match *self { SVGStrokeDashArray::Values(ref values) => { Ok(SVGStrokeDashArray::Values( values.iter().map(ToAnimatedZero::to_animated_zero).collect::<Result<Vec<_>, _>>()?, )) } SVGStrokeDashArray::ContextValue => Ok(SVGStrokeDashArray::ContextValue), } } } impl<O> Animate for SVGOpacity<O> where O: Animate + Clone, { #[inline] fn animate(&self, other: &Self, procedure: Procedure) -> Result<Self, ()> { match (self, other) { (&SVGOpacity::Opacity(ref this), &SVGOpacity::Opacity(ref other)) => { Ok(SVGOpacity::Opacity(this.animate(other, procedure)?)) }, _ => Err(()), } } } <% FILTER_FUNCTIONS = [ 'Blur', 'Brightness', 'Contrast', 'Grayscale', 'HueRotate', 'Invert', 'Opacity', 'Saturate', 'Sepia' ] %> /// <https://drafts.fxtf.org/filters/#animation-of-filters> impl Animate for AnimatedFilter { fn animate( &self, other: &Self, procedure: Procedure, ) -> Result<Self, ()> { match (self, other) { % for func in ['Blur', 'Grayscale', 'HueRotate', 'Invert', 'Sepia']: (&Filter::${func}(ref this), &Filter::${func}(ref other)) => { Ok(Filter::${func}(this.animate(other, procedure)?)) }, % endfor % for func in ['Brightness', 'Contrast', 'Opacity', 'Saturate']: (&Filter::${func}(ref this), &Filter::${func}(ref other)) => { Ok(Filter::${func}(NonNegative(animate_multiplicative_factor( this.0, other.0, procedure, )?))) }, % endfor % if product == "gecko": (&Filter::DropShadow(ref this), &Filter::DropShadow(ref other)) => { Ok(Filter::DropShadow(this.animate(other, procedure)?)) }, % endif _ => Err(()), } } } /// <http://dev.w3.org/csswg/css-transforms/#none-transform-animation> impl ToAnimatedZero for AnimatedFilter { fn to_animated_zero(&self) -> Result<Self, ()> { match *self { % for func in ['Blur', 'Grayscale', 'HueRotate', 'Invert', 'Sepia']: Filter::${func}(ref this) => Ok(Filter::${func}(this.to_animated_zero()?)), % endfor % for func in ['Brightness', 'Contrast', 'Opacity', 'Saturate']: Filter::${func}(_) => Ok(Filter::${func}(NonNegative(1.))), % endfor % if product == "gecko": Filter::DropShadow(ref this) => Ok(Filter::DropShadow(this.to_animated_zero()?)), % endif _ => Err(()), } } } // FIXME(nox): This should be derived. impl ComputeSquaredDistance for AnimatedFilter { fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> { match (self, other) { % for func in FILTER_FUNCTIONS: (&Filter::${func}(ref this), &Filter::${func}(ref other)) => { this.compute_squared_distance(other) }, % endfor % if product == "gecko": (&Filter::DropShadow(ref this), &Filter::DropShadow(ref other)) => { this.compute_squared_distance(other) }, % endif _ => Err(()), } } } impl Animate for AnimatedFilterList { #[inline] fn animate( &self, other: &Self, procedure: Procedure, ) -> Result<Self, ()> { if procedure == Procedure::Add { return Ok(AnimatedFilterList( self.0.iter().chain(other.0.iter()).cloned().collect(), )); } Ok(AnimatedFilterList(self.0.iter().zip_longest(other.0.iter()).map(|it| { match it { EitherOrBoth::Both(this, other) => { this.animate(other, procedure) }, EitherOrBoth::Left(this) => { this.animate(&this.to_animated_zero()?, procedure) }, EitherOrBoth::Right(other) => { other.to_animated_zero()?.animate(other, procedure) }, } }).collect::<Result<Vec<_>, _>>()?)) } } impl ComputeSquaredDistance for AnimatedFilterList { #[inline] fn compute_squared_distance(&self, other: &Self) -> Result<SquaredDistance, ()> { self.0.iter().zip_longest(other.0.iter()).map(|it| { match it { EitherOrBoth::Both(this, other) => { this.compute_squared_distance(other) }, EitherOrBoth::Left(list) | EitherOrBoth::Right(list) => { list.to_animated_zero()?.compute_squared_distance(list) }, } }).sum() } } /// A comparator to sort PropertyIds such that longhands are sorted before shorthands, /// shorthands with fewer components are sorted before shorthands with more components, /// and otherwise shorthands are sorted by IDL name as defined by [Web Animations][property-order]. /// /// Using this allows us to prioritize values specified by longhands (or smaller /// shorthand subsets) when longhands and shorthands are both specified on the one keyframe. /// /// Example orderings that result from this: /// /// margin-left, margin /// /// and: /// /// border-top-color, border-color, border-top, border /// /// [property-order] https://w3c.github.io/web-animations/#calculating-computed-keyframes #[cfg(feature = "gecko")] pub fn compare_property_priority(a: &PropertyId, b: &PropertyId) -> cmp::Ordering { match (a.as_shorthand(), b.as_shorthand()) { // Within shorthands, sort by the number of subproperties, then by IDL name. (Ok(a), Ok(b)) => { let subprop_count_a = a.longhands().len(); let subprop_count_b = b.longhands().len(); subprop_count_a.cmp(&subprop_count_b).then_with( || get_idl_name_sort_order(&a).cmp(&get_idl_name_sort_order(&b))) }, // Longhands go before shorthands. (Ok(_), Err(_)) => cmp::Ordering::Greater, (Err(_), Ok(_)) => cmp::Ordering::Less, // Both are longhands or custom properties in which case they don't overlap and should // sort equally. _ => cmp::Ordering::Equal, } } #[cfg(feature = "gecko")] fn get_idl_name_sort_order(shorthand: &ShorthandId) -> u32 { <% # Sort by IDL name. sorted_shorthands = sorted(data.shorthands, key=lambda p: to_idl_name(p.ident)) # Annotate with sorted position sorted_shorthands = [(p, position) for position, p in enumerate(sorted_shorthands)] %> match *shorthand { % for property, position in sorted_shorthands: ShorthandId::${property.camel_case} => ${position}, % endfor } }<|fim▁end|>
}, _ => Err(()), }
<|file_name|>PlotChangeOwnerEvent.java<|end_file_name|><|fim▁begin|>/* * _____ _ _ _____ _ * | __ \| | | | / ____| | | * | |__) | | ___ | |_| (___ __ _ _ _ __ _ _ __ ___ __| | * | ___/| |/ _ \| __|\___ \ / _` | | | |/ _` | '__/ _ \/ _` | * | | | | (_) | |_ ____) | (_| | |_| | (_| | | | __/ (_| | * |_| |_|\___/ \__|_____/ \__, |\__,_|\__,_|_| \___|\__,_| * | | * |_| * PlotSquared plot management system for Minecraft * Copyright (C) 2014 - 2022 IntellectualSites * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ package com.plotsquared.core.events; import com.plotsquared.core.player.PlotPlayer; import com.plotsquared.core.plot.Plot; import com.plotsquared.core.plot.PlotId; import org.checkerframework.checker.nullness.qual.Nullable; import java.util.UUID; public class PlotChangeOwnerEvent extends PlotEvent implements CancellablePlotEvent { private final PlotPlayer<?> initiator; @Nullable private final UUID oldOwner; private final boolean hasOldOwner; @Nullable private UUID newOwner; private Result eventResult; /** * PlotChangeOwnerEvent: Called when a plot's owner is change. * * @param initiator The player attempting to set the plot's owner * @param plot The plot having its owner changed * @param oldOwner The old owner of the plot or null * @param newOwner The new owner of the plot or null * @param hasOldOwner If the plot has an old owner */ public PlotChangeOwnerEvent( PlotPlayer<?> initiator, Plot plot, @Nullable UUID oldOwner, @Nullable UUID newOwner, boolean hasOldOwner ) { super(plot); this.initiator = initiator; this.newOwner = newOwner; this.oldOwner = oldOwner; this.hasOldOwner = hasOldOwner; } /** * Get the PlotId. * * @return PlotId */ public PlotId getPlotId() { return getPlot().getId(); } /** * Get the world name. * * @return String */ public String getWorld() { return getPlot().getWorldName(); } /** * Get the change-owner initiator * * @return Player */ public PlotPlayer<?> getInitiator() { return this.initiator; } /** * Get the old owner of the plot. Null if not exists. * * @return UUID */ public @Nullable UUID getOldOwner() { return this.oldOwner; } /** * Get the new owner of the plot * * @return UUID */ public @Nullable UUID getNewOwner() { return this.newOwner; } /** * Set the new owner of the plot. Null for no owner. * * @param newOwner the new owner or null */ public void setNewOwner(@Nullable UUID newOwner) { this.newOwner = newOwner; } /** * Get if the plot had an old owner * * @return boolean */ public boolean hasOldOwner() {<|fim▁hole|> public Result getEventResult() { return eventResult; } @Override public void setEventResult(Result e) { this.eventResult = e; } }<|fim▁end|>
return this.hasOldOwner; } @Override
<|file_name|>LayouterSample.java<|end_file_name|><|fim▁begin|>/* * @(#)LayouterSample.java * * Copyright (c) 1996-2010 by the original authors of JHotDraw and all its * contributors. All rights reserved. * * You may not use, copy or modify this file, except in compliance with the * license agreement you entered into with the copyright holders. For details * see accompanying license terms. */ package org.jhotdraw.samples.mini; import org.jhotdraw.draw.tool.DelegationSelectionTool; import org.jhotdraw.draw.layouter.VerticalLayouter; import javax.swing.*; import org.jhotdraw.draw.*; /** * Example showing how to layout two editable text figures and a line figure * within a graphical composite figure. * * @author Werner Randelshofer * @version $Id: LayouterSample.java 718 2010-11-21 17:49:53Z rawcoder $ */ public class LayouterSample { public static void main(String[] args) { SwingUtilities.invokeLater(new Runnable() { @Override public void run() { // Create a graphical composite figure.<|fim▁hole|> // Add child figures to the composite figure composite.add(new TextFigure("Above the line")); composite.add(new LineFigure()); composite.add(new TextFigure("Below the line")); // Set a layouter and perform the layout composite.setLayouter(new VerticalLayouter()); composite.layout(); // Add the composite figure to a drawing Drawing drawing = new DefaultDrawing(); drawing.add(composite); // Create a frame with a drawing view and a drawing editor JFrame f = new JFrame("My Drawing"); f.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); f.setSize(400, 300); DrawingView view = new DefaultDrawingView(); view.setDrawing(drawing); f.getContentPane().add(view.getComponent()); DrawingEditor editor = new DefaultDrawingEditor(); editor.add(view); editor.setTool(new DelegationSelectionTool()); f.setVisible(true); } }); } }<|fim▁end|>
GraphicalCompositeFigure composite = new GraphicalCompositeFigure();
<|file_name|>complement-tests.ts<|end_file_name|><|fim▁begin|>import * as R from 'ramda'; () => {<|fim▁hole|> // $ExpectType (n: number) => boolean const isOdd = R.complement(isEven); isOdd(21); // => true isOdd(42); // => false function isLengthEqual(value: string, length: number): boolean { return value.length === length; } const isLengthNotEqual = R.complement(isLengthEqual); // $ExpectError isLengthNotEqual("FOO", "BAR"); isLengthNotEqual("BAZ", 4); // => true // $ExpectType (value: any) => boolean R.complement(R.isNil); };<|fim▁end|>
function isEven(n: number) { return n % 2 === 0; }
<|file_name|>rays.js<|end_file_name|><|fim▁begin|>define(function(require) { 'use strict'; var _ = require('underscore'); var PIXI = require('pixi'); var PixiView = require('common/v3/pixi/view'); var Colors = require('common/colors/colors'); var Vector2 = require('common/math/vector2'); var Constants = require('constants'); var GUIDE_FILL_COLOR = Colors.parseHex(Constants.RaysView.GUIDE_FILL_COLOR); var GUIDE_LINE_COLOR = Colors.parseHex(Constants.RaysView.GUIDE_LINE_COLOR); var Assets = require('assets'); /** * Draws all the rays coming from points on the source object. * There are three different ray modes and an off mode. */ var RaysView = PixiView.extend({ /** * Initializes the new RaysView. */ initialize: function(options) { this.mvt = options.mvt; this.simulation = this.model; this.mode = RaysView.MARGINAL_RAYS this.virtualImageVisible = false; this.secondPointVisible = false; this.virtualRayColor = Colors.parseHex(RaysView.VIRTUAL_RAY_COLOR); this.sourcePointColor = Colors.parseHex(RaysView.POINT_1_COLOR); this.targetPointColor = Colors.parseHex(RaysView.POINT_2_COLOR); // Cached objects this._sourcePoint = new Vector2(); this._targetPoint = new Vector2(); this.initGraphics(); this.updateMVT(this.mvt); // Listen for changes in the source object this.listenTo(this.simulation.sourceObject, 'change:position', this.drawPoint1Rays); this.listenTo(this.simulation.sourceObject, 'change:secondPoint', this.drawPoint2Rays); // Listen for changes in the lens this.listenTo(this.simulation.lens, 'change:position', this.drawAllRays); this.listenTo(this.simulation.lens, 'change:focalLength', this.drawAllRays); this.listenTo(this.simulation.lens, 'change:diameter', this.drawAllRays); this.listenTo(this.simulation.lens, 'change:diameter change:position', this.updateGuidePositions); }, /** * Initializes all the graphics */ initGraphics: function() { this.sourcePointRays = new PIXI.Graphics(); this.targetPointRays = new PIXI.Graphics(); this.topGuide = new PIXI.Container(); this.bottomGuide = new PIXI.Container(); this.displayObject.addChild(this.sourcePointRays); this.displayObject.addChild(this.targetPointRays); this.displayObject.addChild(this.topGuide); this.displayObject.addChild(this.bottomGuide); this.topGuide.leftGuide = new PIXI.Graphics(); this.topGuide.rightGuide = new PIXI.Graphics(); this.bottomGuide.leftGuide = new PIXI.Graphics(); this.bottomGuide.rightGuide = new PIXI.Graphics(); this.topGuide.addChild(this.topGuide.leftGuide); this.topGuide.addChild(this.topGuide.rightGuide); this.bottomGuide.addChild(this.bottomGuide.leftGuide); this.bottomGuide.addChild(this.bottomGuide.rightGuide); this.hideGuides(); }, /** * Draws all the rays according to the current mode. */ drawAllRays: function() { this.drawPoint1Rays(); this.drawPoint2Rays(); }, /** * Draws the rays coming from the source object's position * according to the current mode. */ drawPoint1Rays: function() { this._sourcePoint.set(this.mvt.modelToView(this.simulation.sourceObject.get('position'))); this._targetPoint.set(this.mvt.modelToView(this.simulation.targetImage.get('position'))); this.drawRays(this.sourcePointRays, this.sourcePointColor, this._sourcePoint, this._targetPoint); }, /** * Draws the rays coming from the source object's second * point according to the current mode. */ drawPoint2Rays: function() { if (this.secondPointVisible) { this._sourcePoint.set(this.mvt.modelToView(this.simulation.sourceObject.get('secondPoint'))); this._targetPoint.set(this.mvt.modelToView(this.simulation.targetImage.get('secondPoint'))); this.drawRays(this.targetPointRays, this.targetPointColor, this._sourcePoint, this._targetPoint); } else this.targetPointRays.clear(); }, /** * Draws a specific set of rays onto the specified graphics * object with the specified color from point 1 through * the lens to point 2. */ drawRays: function(graphics, color, sourcePoint, targetPoint) { graphics.clear(); graphics.lineStyle(RaysView.LINE_WIDTH, color, RaysView.LINE_ALPHA); var Ax = sourcePoint.x; var Ay = sourcePoint.y; var Bx = this.mvt.modelToViewX(this.simulation.lens.get('position').x); var By = this.mvt.modelToViewY(this.simulation.lens.get('position').y); var Cx = targetPoint.x; var Cy = targetPoint.y; // Radius of lens minus a bit so marginal ray hits inside lens var h = Math.abs(this.mvt.modelToViewDeltaY(this.simulation.lens.get('diameter'))) / 2 - RaysView.LENS_TIP_OFFSET; // Length of the ray (enough to go off the screen) var R = 1000; // Rotate the guides var topGuideTheta = Math.atan((Ay - By + h) / (Bx - Ax)); var bottomGuideTheta = Math.atan((Ay - By - h) / (Bx - Ax)); this.topGuide.rotation = -topGuideTheta; this.bottomGuide.rotation = -bottomGuideTheta; // Used to store slope of line towards C var m, m1, m2; // TODO: make guides // Note: Skipping "blur spot" of the algorithm for now because I don't // understand what it does and don't think it's used anymore var objectLensDistance = this.getObjectLensDistance(); var virtualImage = this.simulation.targetImage.isVirtualImage(); // Draw different rays depending on the mode if (this.mode === RaysView.MARGINAL_RAYS && objectLensDistance > 0) { if (!virtualImage) { graphics.moveTo(Ax, Ay); graphics.lineTo(Bx, By); // Cannot draw line directly to C since it may be at infinity. m = (Cy - By) / (Cx - Bx); graphics.lineTo(Bx + R, By + (m * R)); graphics.moveTo(Ax, Ay); graphics.lineTo(Bx, By + h); m = (Cy - (By + h)) / (Cx - Bx); graphics.lineTo(Bx + R, By + h + (m * R)); graphics.moveTo(Ax, Ay); graphics.lineTo(Bx, By - h); m = (Cy - (By - h)) / (Cx - Bx); graphics.lineTo(Bx + R, By - h + (m * R)); } else { graphics.moveTo(Ax, Ay); graphics.lineTo(Bx, By); m = (By - Cy) / (Bx - Cx); graphics.lineTo(Bx + R, By + (m * R)); graphics.moveTo(Ax, Ay); graphics.lineTo(Bx, By + h); m = ((By + h) - Cy) / (Bx - Cx); graphics.lineTo(Bx + R, By + h + (m * R)); graphics.moveTo(Ax, Ay); graphics.lineTo(Bx, By - h); m = ((By - h) - Cy) / (Bx - Cx); graphics.lineTo(Bx + R, By - h + (m * R)); // Draw virtual marginal rays if (this.virtualImageVisible && Cx > -5 * R) { // Last condition needed to prevent problems that occur when image at infinity graphics.lineStyle(RaysView.LINE_WIDTH, this.virtualRayColor, RaysView.LINE_ALPHA); graphics.moveTo(Ax, Ay); graphics.lineTo(Cx, Cy); graphics.moveTo(Bx, By+ h); graphics.lineTo(Cx, Cy); graphics.moveTo(Bx, By - h); graphics.lineTo(Cx, Cy); } } } else if (this.mode === RaysView.PRINCIPAL_RAYS && objectLensDistance > 0) { var f = this.mvt.modelToViewDeltaX(this.simulation.lens.get('focalLength')); graphics.moveTo(Ax, Ay); graphics.lineTo(Bx, By); m = (By - Ay) / (Bx - Ax); graphics.lineTo(Bx + R, By + (m * R)); graphics.moveTo(Ax, Ay); graphics.lineTo(Bx, Ay); m2 = (By - Ay) / f; graphics.lineTo(Bx + R, Ay + (m2 * R)); graphics.moveTo(Ax, Ay);<|fim▁hole|> graphics.lineTo(Bx + R, By + (m1 * f)); // Draw principal virtual rays if (this.virtualImageVisible && virtualImage) { graphics.lineStyle(RaysView.LINE_WIDTH, this.virtualRayColor, RaysView.LINE_ALPHA); graphics.moveTo(Ax, Ay); graphics.lineTo(Cx, Cy); graphics.moveTo(Bx, Cy); graphics.lineTo(Cx, Cy); graphics.moveTo(Bx, Ay); graphics.lineTo(Cx, Cy); } } else if (this.mode === RaysView.MANY_RAYS) { var N = 25; // Number of rays var deltaTheta = 180 / N; // Degrees between adjacent arrays var degToRad = Math.PI / 180; var bottomTheta = Math.atan((Ay-By-h) / (Bx-Ax)) * 180 / Math.PI; var topTheta = Math.atan((Ay-By+h) / (Bx-Ax)) * 180 / Math.PI; var bottomSlope = (Ay-By-h) / (Bx-Ax); var topSlope = (Ay-By+h) / (Bx-Ax); for (var i = 5; i < (N - 5); i++) { m = Math.tan(degToRad * (90 - i * deltaTheta)); if (m > bottomSlope && m < topSlope) { graphics.moveTo(Ax, Ay); graphics.lineStyle(RaysView.LINE_WIDTH, color, RaysView.LINE_ALPHA); graphics.lineTo(Bx, Ay - m * (Bx - Ax)); m2 = (Cy - (Ay - m * (Bx - Ax))) / (Cx - Bx); graphics.lineTo(Bx + R, Ay - m * (Bx - Ax) + m2 * R); if (Cx < Ax && this.virtualImageVisible && Cx > -5 * R) { graphics.moveTo(Bx, Ay - m * (Bx - Ax)); graphics.lineStyle(RaysView.LINE_WIDTH, this.virtualRayColor, 0.6); graphics.lineTo(Cx, Cy); } } else { graphics.moveTo(Ax, Ay); graphics.lineStyle(RaysView.LINE_WIDTH, color, RaysView.LINE_ALPHA); graphics.lineTo(Ax + R, Ay - m * R); } } } }, /** * Draws the guides in their unrotated state */ drawGuides: function() { this.drawGuide(this.topGuide, RaysView.GUIDE_ANGLE); this.drawGuide(this.bottomGuide, -RaysView.GUIDE_ANGLE); }, drawGuide: function(guide, angle) { var width = this.mvt.modelToViewDeltaX(RaysView.GUIDE_WIDTH); var height = this.mvt.modelToViewDeltaX(RaysView.GUIDE_HEIGHT); guide.leftGuide.clear(); guide.leftGuide.lineStyle(RaysView.GUIDE_LINE_WIDTH, GUIDE_LINE_COLOR, RaysView.GUIDE_LINE_ALPHA); guide.leftGuide.beginFill(GUIDE_FILL_COLOR, RaysView.GUIDE_FILL_ALPHA); guide.leftGuide.drawRect(0, -height / 2, width, height); guide.leftGuide.endFill(); guide.leftGuide.rotation = Math.PI; guide.rightGuide.clear(); guide.rightGuide.lineStyle(RaysView.GUIDE_LINE_WIDTH, GUIDE_LINE_COLOR, RaysView.GUIDE_LINE_ALPHA); guide.rightGuide.beginFill(GUIDE_FILL_COLOR, RaysView.GUIDE_FILL_ALPHA); guide.rightGuide.drawRect(0, -height / 2, width, height); guide.rightGuide.endFill(); guide.rightGuide.beginFill(GUIDE_FILL_COLOR, RaysView.GUIDE_FILL_ALPHA); guide.rightGuide.drawCircle(0, 0, height * 0.7); guide.rightGuide.endFill(); guide.rightGuide.rotation = Math.PI - angle; }, getObjectLensDistance: function() { return this.simulation.lens.get('position').x - this.simulation.sourceObject.get('position').x; }, /** * Updates the model-view-transform and anything that * relies on it. */ updateMVT: function(mvt) { this.mvt = mvt; this.drawGuides(); this.drawAllRays(); this.updateGuidePositions(); }, /** * Makes sure the guides stay on either end of the lens. */ updateGuidePositions: function() { var lensX = this.mvt.modelToViewX(this.simulation.lens.get('position').x); var lensY = this.mvt.modelToViewY(this.simulation.lens.get('position').y); var lensDiameter = this.mvt.modelToViewDeltaX(this.simulation.lens.get('diameter')); this.topGuide.x = lensX; this.topGuide.y = lensY - lensDiameter / 2 + RaysView.LENS_TIP_OFFSET; this.bottomGuide.x = lensX; this.bottomGuide.y = lensY + lensDiameter / 2 - RaysView.LENS_TIP_OFFSET; }, /** * Sets the mode that dictates what kinds of rays we draw. */ setMode: function(mode) { this.mode = mode; this.drawAllRays(); }, /** * Shows rays for second point */ showSecondPoint: function() { this.secondPointVisible = true; this.drawAllRays(); }, hideSecondPoint: function() { this.secondPointVisible = false; this.drawAllRays(); }, showVirtualImage: function() { this.virtualImageVisible = true; this.drawAllRays(); }, hideVirtualImage: function() { this.virtualImageVisible = false; this.drawAllRays(); }, showGuides: function() { this.topGuide.visible = true; this.bottomGuide.visible = true; }, hideGuides: function() { this.topGuide.visible = false; this.bottomGuide.visible = false; } }, Constants.RaysView); return RaysView; });<|fim▁end|>
m1 = (By - Ay) / (Bx - f - Ax); graphics.lineTo(Bx, By + (m1 * f));
<|file_name|>use-keyword.rs<|end_file_name|><|fim▁begin|>// Copyright 2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Check that imports with nakes super and self don't fail during parsing // FIXME: this shouldn't fail during name resolution either mod a { mod b { use self as A;<|fim▁hole|> use super as B; //~^ ERROR unresolved import `super` [E0432] //~| no `super` in the root use super::{self as C}; //~^ ERROR unresolved import `super` [E0432] //~| no `super` in the root } } fn main() {}<|fim▁end|>
//~^ ERROR `self` imports are only allowed within a { } list
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>/* * Copyright (c) Meta Platforms, Inc. and affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ mod directives; use super::get_applied_fragment_name; use crate::util::{remove_directive, replace_directive}; use common::{Diagnostic, DiagnosticsResult, NamedItem, WithLocation}; pub use directives::{DeferDirective, StreamDirective}; use graphql_ir::{ Argument, ConstantValue, Directive, Field, FragmentDefinition, FragmentSpread, InlineFragment, LinkedField, OperationDefinition, Program, ScalarField, Selection, Transformed, Transformer, Value, }; use intern::string_key::{Intern, StringKey}; use lazy_static::lazy_static; use schema::Schema; use std::{collections::HashMap, sync::Arc}; use thiserror::Error; pub struct DeferStreamConstants {<|fim▁hole|> pub label_arg: StringKey, pub initial_count_arg: StringKey, pub use_customized_batch_arg: StringKey, } impl Default for DeferStreamConstants { fn default() -> Self { Self { defer_name: "defer".intern(), stream_name: "stream".intern(), if_arg: "if".intern(), label_arg: "label".intern(), initial_count_arg: "initial_count".intern(), use_customized_batch_arg: "use_customized_batch".intern(), } } } lazy_static! { pub static ref DEFER_STREAM_CONSTANTS: DeferStreamConstants = Default::default(); } pub fn transform_defer_stream(program: &Program) -> DiagnosticsResult<Program> { let mut transformer = DeferStreamTransform { program, current_document_name: None, labels: Default::default(), errors: Default::default(), }; let next_program = transformer.transform_program(program); if transformer.errors.is_empty() { Ok(next_program.replace_or_else(|| program.clone())) } else { Err(transformer.errors) } } struct DeferStreamTransform<'s> { program: &'s Program, current_document_name: Option<StringKey>, labels: HashMap<StringKey, Directive>, errors: Vec<Diagnostic>, } impl DeferStreamTransform<'_> { fn set_current_document_name(&mut self, document_name: StringKey) { self.current_document_name = Some(document_name) } fn record_label(&mut self, label: StringKey, directive: &Directive) { let prev_directive = self.labels.get(&label); match prev_directive { Some(prev) => { self.errors.push( Diagnostic::error( ValidationMessage::LabelNotUniqueForDeferStream { directive_name: DEFER_STREAM_CONSTANTS.defer_name, }, prev.name.location, ) .annotate("related location", directive.name.location), ); } None => { self.labels.insert(label, directive.to_owned()); } }; } fn transform_defer( &mut self, spread: &FragmentSpread, defer: &Directive, ) -> Result<Transformed<Selection>, Diagnostic> { let DeferDirective { if_arg, label_arg } = DeferDirective::from(defer); if is_literal_false_arg(if_arg) { return Ok(Transformed::Replace(Selection::FragmentSpread(Arc::new( FragmentSpread { directives: remove_directive(&spread.directives, defer.name.item), ..spread.clone() }, )))); } let label_value = get_literal_string_argument(defer, label_arg)?; let label = label_value .unwrap_or_else(|| get_applied_fragment_name(spread.fragment.item, &spread.arguments)); let transformed_label = transform_label( self.current_document_name .expect("We expect the parent name to be defined here."), DEFER_STREAM_CONSTANTS.defer_name, label, ); self.record_label(transformed_label, defer); let next_label_value = Value::Constant(ConstantValue::String(transformed_label)); let next_label_arg = Argument { name: WithLocation { item: DEFER_STREAM_CONSTANTS.label_arg, location: label_arg.map_or(defer.name.location, |arg| arg.name.location), }, value: WithLocation { item: next_label_value, location: label_arg.map_or(defer.name.location, |arg| arg.value.location), }, }; let mut next_arguments = Vec::with_capacity(2); next_arguments.push(next_label_arg); if let Some(if_arg) = if_arg { next_arguments.push(if_arg.clone()); } let next_defer = Directive { name: defer.name, arguments: next_arguments, data: None, }; Ok(Transformed::Replace(Selection::InlineFragment(Arc::new( InlineFragment { type_condition: None, directives: vec![next_defer], selections: vec![Selection::FragmentSpread(Arc::new(FragmentSpread { directives: remove_directive(&spread.directives, defer.name.item), ..spread.clone() }))], }, )))) } fn transform_stream( &mut self, linked_field: &LinkedField, stream: &Directive, ) -> Result<Transformed<Selection>, Diagnostic> { let schema_field = self.program.schema.field(linked_field.definition.item); if !schema_field.type_.is_list() { return Err(Diagnostic::error( ValidationMessage::StreamFieldIsNotAList { field_name: schema_field.name.item, }, stream.name.location, )); } let StreamDirective { if_arg, label_arg, initial_count_arg, use_customized_batch_arg, } = StreamDirective::from(stream); let transformed_linked_field = self.default_transform_linked_field(linked_field); let get_next_selection = |directives| match transformed_linked_field { Transformed::Replace(mut selection) => { selection.set_directives(directives); Transformed::Replace(selection) } Transformed::Keep => { Transformed::Replace(Selection::LinkedField(Arc::new(LinkedField { directives, ..linked_field.clone() }))) } Transformed::Delete => Transformed::Delete, }; if is_literal_false_arg(if_arg) { return Ok(get_next_selection(remove_directive( &linked_field.directives, stream.name.item, ))); } if initial_count_arg.is_none() { return Err(Diagnostic::error( ValidationMessage::StreamInitialCountRequired, stream.name.location, )); } let label_value = get_literal_string_argument(stream, label_arg)?; let label = label_value.unwrap_or_else(|| { get_applied_fragment_name( linked_field.alias_or_name(&self.program.schema), &linked_field.arguments, ) }); let transformed_label = transform_label( self.current_document_name .expect("We expect the parent name to be defined here."), DEFER_STREAM_CONSTANTS.stream_name, label, ); self.record_label(transformed_label, stream); let next_label_value = Value::Constant(ConstantValue::String(transformed_label)); let next_label_arg = Argument { name: WithLocation { item: DEFER_STREAM_CONSTANTS.label_arg, location: label_arg.map_or(stream.name.location, |arg| arg.name.location), }, value: WithLocation { item: next_label_value, location: label_arg.map_or(stream.name.location, |arg| arg.value.location), }, }; let mut next_arguments = Vec::with_capacity(4); next_arguments.push(next_label_arg); if let Some(if_arg) = if_arg { next_arguments.push(if_arg.clone()); } if let Some(initial_count_arg) = initial_count_arg { next_arguments.push(initial_count_arg.clone()); } if let Some(use_customized_batch_arg) = use_customized_batch_arg { next_arguments.push(use_customized_batch_arg.clone()); } let next_stream = Directive { name: stream.name, arguments: next_arguments, data: None, }; Ok(get_next_selection(replace_directive( &linked_field.directives, next_stream, ))) } } impl<'s> Transformer for DeferStreamTransform<'s> { const NAME: &'static str = "DeferStreamTransform"; const VISIT_ARGUMENTS: bool = false; const VISIT_DIRECTIVES: bool = false; fn transform_operation( &mut self, operation: &OperationDefinition, ) -> Transformed<OperationDefinition> { self.set_current_document_name(operation.name.item); self.default_transform_operation(operation) } fn transform_fragment( &mut self, fragment: &FragmentDefinition, ) -> Transformed<FragmentDefinition> { self.set_current_document_name(fragment.name.item); self.default_transform_fragment(fragment) } /// Validates @defer is not allowed on inline fragments. fn transform_inline_fragment( &mut self, inline_fragment: &InlineFragment, ) -> Transformed<Selection> { let defer_directive = inline_fragment .directives .named(DEFER_STREAM_CONSTANTS.defer_name); if let Some(directive) = defer_directive { // Special case for @defer generated by transform_connection if let Some(label) = directive.arguments.named(DEFER_STREAM_CONSTANTS.label_arg) { if let Some(label) = label.value.item.get_string_literal() { if label.lookup().contains("$defer$") { return self.default_transform_inline_fragment(inline_fragment); } } } self.errors.push(Diagnostic::error( ValidationMessage::InvalidDeferOnInlineFragment, directive.name.location, )); } self.default_transform_inline_fragment(inline_fragment) } /// Transform of fragment spread with @defer is delegated to `transform_defer`. fn transform_fragment_spread(&mut self, spread: &FragmentSpread) -> Transformed<Selection> { let defer_directive = spread.directives.named(DEFER_STREAM_CONSTANTS.defer_name); if let Some(defer) = defer_directive { match self.transform_defer(spread, defer) { Ok(transformed) => transformed, Err(err) => { self.errors.push(err); self.default_transform_fragment_spread(spread) } } } else { self.default_transform_fragment_spread(spread) } } /// Validates @stream is not allowed on scalar fields. fn transform_scalar_field(&mut self, scalar_field: &ScalarField) -> Transformed<Selection> { let stream_directive = &scalar_field .directives .named(DEFER_STREAM_CONSTANTS.stream_name); if let Some(directive) = stream_directive { self.errors.push(Diagnostic::error( ValidationMessage::InvalidStreamOnScalarField { field_name: scalar_field.alias_or_name(&self.program.schema), }, directive.name.location, )); } self.default_transform_scalar_field(scalar_field) } /// Transform of linked field with @stream is delegated to `transform_stream`. fn transform_linked_field(&mut self, linked_field: &LinkedField) -> Transformed<Selection> { let stream_directive = linked_field .directives .named(DEFER_STREAM_CONSTANTS.stream_name); if let Some(stream) = stream_directive { match self.transform_stream(linked_field, stream) { Ok(transformed) => transformed, Err(err) => { self.errors.push(err); self.default_transform_linked_field(linked_field) } } } else { self.default_transform_linked_field(linked_field) } } } fn is_literal_false_arg(arg: Option<&Argument>) -> bool { if let Some(arg) = arg { matches!( arg.value.item, Value::Constant(ConstantValue::Boolean(false)) ) } else { false } } fn transform_label( parent_name: StringKey, directive_name: StringKey, label: StringKey, ) -> StringKey { format!("{}${}${}", parent_name, directive_name, label).intern() } fn get_literal_string_argument( directive: &Directive, argument: Option<&Argument>, ) -> Result<Option<StringKey>, Diagnostic> { if let Some(arg) = argument { if let Some(val) = arg.value.item.get_string_literal() { Ok(Some(val)) } else { Err(Diagnostic::error( ValidationMessage::LiteralStringArgumentExpectedForDirective { arg_name: arg.name.item, directive_name: directive.name.item, }, directive.name.location, )) } } else { Ok(None) } } #[derive(Debug, Error)] enum ValidationMessage { #[error( "Invalid use of @{directive_name}, the provided label is not unique. Specify a unique 'label' as a literal string." )] LabelNotUniqueForDeferStream { directive_name: StringKey }, #[error("Field '{field_name}' is not of list type, therefore cannot use @stream directive.")] StreamFieldIsNotAList { field_name: StringKey }, #[error("Invalid use of @stream, the 'initial_count' argument is required.")] StreamInitialCountRequired, #[error( "Invalid use of @defer on an inline fragment. Relay only supports @defer on fragment spreads." )] InvalidDeferOnInlineFragment, #[error("Invalid use of @stream on scalar field '{field_name}'")] InvalidStreamOnScalarField { field_name: StringKey }, #[error( "Expected the '{arg_name}' value to @{directive_name} to be a string literal if provided." )] LiteralStringArgumentExpectedForDirective { arg_name: StringKey, directive_name: StringKey, }, }<|fim▁end|>
pub defer_name: StringKey, pub stream_name: StringKey, pub if_arg: StringKey,
<|file_name|>netdata.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- """ Display network speed and bandwidth usage. Configuration parameters: cache_timeout: refresh interval for this module (default 2) format: display format for this module *(default '{nic} [\?color=down LAN(Kb): {down}↓ {up}↑] [\?color=total T(Mb): {download}↓ {upload}↑ {total}↕]')* nic: network interface to use (default None) thresholds: color thresholds to use *(default {'down': [(0, 'bad'), (30, 'degraded'), (60, 'good')], 'total': [(0, 'good'), (400, 'degraded'), (700, 'bad')]})* Format placeholders: {nic} network interface {down} number of download speed {up} number of upload speed {download} number of download usage {upload} number of upload usage {total} number of total usage Color thresholds: {down} color threshold of download speed {total} color threshold of total usage @author Shahin Azad <ishahinism at Gmail> SAMPLE OUTPUT [ {'full_text': 'eth0 '}, {'full_text': 'LAN(Kb): 77.8↓ 26.9↑ ', 'color': '#00FF00'}, {'full_text': 'T(Mb): 394↓ 45↑ 438↕', 'color': '#FFFF00'}, ] """ class GetData: """ Get system status. """ def __init__(self, nic): self.nic = nic def netBytes(self): """ Get bytes directly from /proc. """ with open('/proc/net/dev') as fh: net_data = fh.read().split() interface_index = net_data.index(self.nic + ':') received_bytes = int(net_data[interface_index + 1]) transmitted_bytes = int(net_data[interface_index + 9]) return received_bytes, transmitted_bytes class Py3status: """ """ # available configuration parameters cache_timeout = 2 format = u'{nic} [\?color=down LAN(Kb): {down}↓ {up}↑] ' + \ u'[\?color=total T(Mb): {download}↓ {upload}↑ {total}↕]' nic = None thresholds = { 'down': [(0, 'bad'), (30, 'degraded'), (60, 'good')], 'total': [(0, 'good'), (400, 'degraded'), (700, 'bad')] } class Meta: def deprecate_function(config): return { 'thresholds': { 'down': [ (0, 'bad'), (config.get('low_speed', 30), 'degraded'), (config.get('med_speed', 60), 'good') ], 'total': [ (0, 'good'), (config.get('low_traffic', 400), 'degraded'), (config.get('med_traffic', 700), 'bad') ] } } deprecated = { 'function': [ {'function': deprecate_function}, ], 'remove': [ { 'param': 'low_speed', 'msg': 'obsolete, set using thresholds parameter', }, { 'param': 'med_speed', 'msg': 'obsolete, set using thresholds parameter', }, { 'param': 'low_traffic', 'msg': 'obsolete, set using thresholds parameter', }, { 'param': 'med_traffic', 'msg': 'obsolete, set using thresholds parameter', }, ], } update_config = { 'update_placeholder_format': [ { 'placeholder_formats': { 'down': ':5.1f', 'up': ':5.1f', 'download': ':3.0f', 'upload': ':3.0f', 'total': ':3.0f', }, 'format_strings': ['format'] }, ], } def post_config_hook(self): """ Get network interface. """ self.old_transmitted = 0 self.old_received = 0<|fim▁hole|> # Get default gateway directly from /proc. with open('/proc/net/route') as fh: for line in fh: fields = line.strip().split() if fields[1] == '00000000' and int(fields[3], 16) & 2: self.nic = fields[0] break if self.nic is None: self.nic = 'lo' self.py3.log('selected nic: %s' % self.nic) def netdata(self): """ Calculate network speed and network traffic. """ data = GetData(self.nic) received_bytes, transmitted_bytes = data.netBytes() # net_speed (statistic) down = (received_bytes - self.old_received) / 1024. up = (transmitted_bytes - self.old_transmitted) / 1024. self.old_received = received_bytes self.old_transmitted = transmitted_bytes # net_traffic (statistic) download = received_bytes / 1024 / 1024. upload = transmitted_bytes / 1024 / 1024. total = download + upload # color threshold self.py3.threshold_get_color(down, 'down') self.py3.threshold_get_color(total, 'total') netdata = self.py3.safe_format(self.format, {'down': down, 'up': up, 'download': download, 'upload': upload, 'total': total, 'nic': self.nic}) return { 'cached_until': self.py3.time_in(self.cache_timeout), 'full_text': netdata } if __name__ == "__main__": """ Run module in test mode. """ from py3status.module_test import module_test module_test(Py3status)<|fim▁end|>
if self.nic is None:
<|file_name|>sectors.rs<|end_file_name|><|fim▁begin|>use rand::{seq, ChaChaRng, SeedableRng}; use rayon::prelude::*; use std::{ collections::HashMap, sync::atomic::{AtomicBool, Ordering}, time::Instant, usize::MAX, }; use config::GameConfig; use entities::Faction; use entities::Sector; use utils::Point; /// Used for generating sectors. pub struct SectorGen {} impl SectorGen { /// Create a new sector generator. pub fn new() -> SectorGen { SectorGen {} } /// Split the systems in to a set number of clusters using K-means. pub fn generate(&self, config: &GameConfig, system_locations: Vec<Point>) -> Vec<Sector> { // Measure time for generation. let now = Instant::now(); info!("Simulating expansion for initial sectors..."); let seed: &[_] = &[config.map_seed as u32]; let mut rng: ChaChaRng = ChaChaRng::from_seed(seed); // Setup initial centroids let mut centroids = seq::sample_iter(&mut rng, system_locations.iter(), config.number_of_sectors) .unwrap() .into_iter() .cloned() .collect::<Vec<_>>(); // System to cluster_id mapping let mut cluster_map: HashMap<Point, usize> = system_locations .into_iter() .map(|point| (point, 0)) .collect(); // Run K means until convergence, i.e until no reassignments let mut has_assigned = true; while has_assigned { let wrapped_assigned = AtomicBool::new(false); // Assign to closest centroid cluster_map .par_iter_mut() .for_each(|(system_location, cluster_id)| { let mut closest_cluster = *cluster_id; let mut closest_distance = system_location.distance(&centroids[*cluster_id]); for (i, centroid) in centroids.iter().enumerate() { let distance = system_location.distance(centroid); if distance < closest_distance { wrapped_assigned.store(true, Ordering::Relaxed); closest_cluster = i; closest_distance = distance; } } *cluster_id = closest_cluster; }); has_assigned = wrapped_assigned.load(Ordering::Relaxed); // Calculate new centroids centroids //.par_iter_mut() .iter_mut() .enumerate() .for_each(|(id, centroid)| { let mut count = 0.; let mut new_centroid = Point::origin(); for (system_location, _) in cluster_map.iter().filter(|&(_, c_id)| *c_id == id) { new_centroid += *system_location; count += 1.; } new_centroid *= 1. / count; *centroid = new_centroid; }); } // Setup cluster vectors let mut sector_vecs = (0..config.number_of_sectors).fold(Vec::<Vec<Point>>::new(), |mut sectors, _| { sectors.push(vec![]); sectors }); // Map systems to final cluster for (system_location, id) in cluster_map { sector_vecs[id].push(system_location); } // Create sector for each cluster let sectors = sector_vecs .into_iter() .map(|system_locations| { let sector_seed: &[_] = &[system_locations.len() as u32]; let mut faction_rng: ChaChaRng = SeedableRng::from_seed(sector_seed); Sector { system_locations, faction: Faction::random_faction(&mut faction_rng), } }) .collect::<Vec<Sector>>(); info!( "Mapped galaxy into {} sectors of {} systems, avg size: {}, <|fim▁hole|> sectors.len(), sectors .iter() .fold(0, |acc, sec| acc + sec.system_locations.len()), sectors .iter() .fold(0, |acc, sec| acc + sec.system_locations.len()) / sectors.len(), sectors .iter() .fold(0, |acc, sec| acc.max(sec.system_locations.len())), sectors .iter() .fold(MAX, |acc, sec| acc.min(sec.system_locations.len())), ((now.elapsed().as_secs() * 1_000) + u64::from(now.elapsed().subsec_millis())), sectors.iter().fold(0, |acc, sec| acc + match sec.faction { Faction::Cartel => 1, _ => 0, }), sectors.iter().fold(0, |acc, sec| acc + match sec.faction { Faction::Empire => 1, _ => 0, }), sectors.iter().fold(0, |acc, sec| acc + match sec.faction { Faction::Federation => 1, _ => 0, }), sectors.iter().fold(0, |acc, sec| acc + match sec.faction { Faction::Independent => 1, _ => 0, }) ); sectors } }<|fim▁end|>
max size {}, min size {}, taking {} ms \n Sectors include: {} Cartel, {} Empire, {} Federation, {} Independent",
<|file_name|>oskari_maplayers_migration.js<|end_file_name|><|fim▁begin|>var _ = require("lodash-node"); module.exports = function(client) { client.connect(function(err) { if(err) { return console.error('could not connect to postgres', err); } // 1. clear any previous migrations, db constraints will cascade on theme links and maplayers // var query = "DELETE FROM oskari_layergroup; DELETE FROM oskari_resource WHERE resource_mapping LIKE '%_migrated+collection';"; runSQL(client, query, copyLayerGroups, 'could not clear previous migration!'); }); // 2. Create layer groups (organisations) function copyLayerGroups(client) { var query = "INSERT INTO oskari_layergroup (id, locale) SELECT id, locale FROM portti_layerclass WHERE parent IS NULL"; runSQL(client, query, fixGroupIdSequence, 'could not copy layer groups'); } // 3. update serial column sequence value since we inserted ids manually!! function fixGroupIdSequence(client) { var fixSequenceSQL = "SELECT setval(pg_get_serial_sequence('oskari_layergroup', 'id'), (SELECT MAX(id) FROM oskari_layergroup));"; runSQL(client, fixSequenceSQL, copyNormalLayers, 'could not fix sequence for oskari_layergroup.id'); } // 4. Copy independent/normal layers (that are not sublayers) function copyNormalLayers(client) { var selectSQL = "SELECT id, -1 AS parentId, layer_type, false AS base_map, layerclassid AS groupId, wmsname, wmsurl, " + "locale, opacity, '' AS style, minscale, maxscale, legend_image, dataurl, " + "tile_matrix_set_id, tile_matrix_set_data, gfi_type, xslt, " + "created, updated " + "FROM portti_maplayer WHERE layerclassid IN (SELECT id FROM oskari_layergroup);"; var insertSQL = "INSERT INTO oskari_maplayer(" + "id, parentid, type, base_map, groupid, name, url," + "locale, opacity, style, minscale, maxscale, legend_image, metadataid," + "tile_matrix_set_id, tile_matrix_set_data, gfi_type, gfi_xslt, " + "created, updated) "; runSQL(client, insertSQL + selectSQL, copySubLayers, 'could not copy normal layers'); } // 5. copy layers that are sublayers function copySubLayers(client) { // NOTE! sublayers will have groupId=1 and parentId as layerclassid var selectSQL = "SELECT id, layerclassid AS parentId, layer_type, false AS base_map, 1 AS groupId, wmsname, wmsurl, " + "locale, opacity, style, minscale, maxscale, legend_image, dataurl, " + "tile_matrix_set_id, tile_matrix_set_data, gfi_type, xslt, " + "created, updated " + "FROM portti_maplayer WHERE layerclassid NOT IN (SELECT id FROM oskari_layergroup);"; var insertSQL = "INSERT INTO oskari_maplayer(" + "id, parentid, type, base_map, groupid, name, url," + "locale, opacity, style, minscale, maxscale, legend_image, metadataid," + "tile_matrix_set_id, tile_matrix_set_data, gfi_type, gfi_xslt, " + "created, updated) "; runSQL(client, insertSQL + selectSQL, fixIdSequence, 'could not copy sublayers'); } // 6. update serial column sequence value since we inserted ids manually!! function fixIdSequence(client) { var fixSequenceSQL = "SELECT setval(pg_get_serial_sequence('oskari_maplayer', 'id'), (SELECT MAX(id) FROM oskari_maplayer));"; runSQL(client, fixSequenceSQL, createNewBaseLayers, 'could not fix sequence for oskari_maplayer.id'); } // 7. establish new rows to oskari_maplayers from portti_layerclass base/group layers function createNewBaseLayers(client) { var selectSQL = "SELECT -1 AS parentId, id AS externalId, 'collection' AS type, NOT group_map AS base_map, parent AS groupId, " + "id || '_migrated' AS name, 'collection' AS url, " + "locale, 100 AS opacity, '' AS style, -1 AS minscale, -1 AS maxscale, legend_image, dataurl AS metadataId, " + "'' AS tile_matrix_set_id, '' AS tile_matrix_set_data, '' AS gfi_type, '' AS gfi_xslt " + "FROM portti_layerclass WHERE parent IS NOT NULL"; var insertSQL = "INSERT INTO oskari_maplayer(" + "parentid, externalId, type, base_map, groupid, name, url," + "locale, opacity, style, minscale, maxscale, legend_image, metadataid," + "tile_matrix_set_id, tile_matrix_set_data, gfi_type, gfi_xslt) "; runSQL(client, insertSQL + selectSQL, linkSublayers, 'could not establish new collection layers'); } // 8. link sublayers to new baselayers function linkSublayers(client) { var linkSQL = "UPDATE oskari_maplayer SET parentId = m.id FROM oskari_maplayer m " + "WHERE oskari_maplayer.parentId != -1 AND oskari_maplayer.parentId = m.externalId::integer" //runSQL(client, linkSQL, updateExternalIds, 'could not link sublayers'); runSQL(client, linkSQL, copyBaseLayerPermissions, 'could not link sublayers'); } // 8.5 setup base/group layer permissions function copyBaseLayerPermissions(client) { var selectGroupsSQL = "SELECT id, name, url, externalId FROM oskari_maplayer WHERE type='collection'"; client.query(selectGroupsSQL, function(err, groupLayers) { if(err) { return console.error("Couldn't find collection layers", err); } console.log('got collection layers', groupLayers.rows.length); var count = 0; var resources = []; function permissionsCopied(err) { /* if(err) { // if previous layer had no permissions -> an error will occur, // so skipping any errors since this _should_ work :) console.log("Permissions with resource ids:", resources); return console.error("Couldn't insert permissions for resource", err); }*/ count++; // after all sqls executed -> go to next step if(count == groupLayers.rows.length) { console.log("Inserted new resources/permissions with resource ids:", resources); updateExternalIds(client); } } _.forEach(groupLayers.rows, function(layer) { //console.log('Handling layer:', layer); // insert as new resources var insertResource = "INSERT INTO oskari_resource (resource_type, resource_mapping) " + "VALUES ('maplayer', '" + layer.url + "+" + layer.name + "') " + "RETURNING ID;"; client.query(insertResource, function(err, insertResult) { if(err) { count++; return console.error("Couldn't insert grouplayer as resource", layer, err); } var resourceId = insertResult.rows[0].id; resources.push(resourceId); // copy permissions from matching layerclass var copyPermissionsSQL = "INSERT INTO oskari_permission (oskari_resource_id, external_type, permission, external_id) " + "SELECT " + resourceId + ", p.external_type, p.permission, p.external_id FROM oskari_resource r, oskari_permission p " + "WHERE r.id = p.oskari_resource_id AND r.resource_type='layerclass' AND r.resource_mapping = 'BASE+" + layer.externalid + "'"; runSQL(client, copyPermissionsSQL, permissionsCopied, 'Could not copy permissions for layer: ' + JSON.stringify(layer)); }); }); }); } // 9. update externalId with base_ prefix function updateExternalIds(client) { var prefixSQL = "UPDATE oskari_maplayer SET externalId = 'base_' || externalId " + "WHERE externalId IS NOT NULL"; runSQL(client, prefixSQL, linkInspireThemesForNormalLayers, 'could not prefix external ids'); } // 10. link themes from old db table for layers that exist there (non baselayers) // TODO: check collection layers inspire themes! function linkInspireThemesForNormalLayers(client) { var query = "INSERT INTO oskari_maplayer_themes (maplayerid, themeid) SELECT id, inspire_theme_id FROM portti_maplayer"; runSQL(client, query, linkInspireThemesForCollectionLayers, 'could not link inspire themes'); } // 11. Add inspire theme links to base/grouplayers function linkInspireThemesForCollectionLayers(client) { var selectSQL = "SELECT DISTINCT m1.id AS baseId, t.themeid FROM oskari_maplayer m1, oskari_maplayer m2, " + "oskari_maplayer_themes t WHERE m1.id = m2.parentId AND t.maplayerid = m2.id;" var linkSql = "INSERT INTO oskari_maplayer_themes (maplayerid, themeid) "; runSQL(client, linkSql + selectSQL, updateStylesForWMS, 'could not link inspire themes'); } // 12. update default styles for wms layers // TODO: check if wmts needs this!! function updateStylesForWMS(client) { var updateSQL = "UPDATE oskari_maplayer SET style = substr(m.style, 1, 100) FROM portti_maplayer m " + "WHERE oskari_maplayer.id = m.id AND m.layer_type = 'wmslayer'" runSQL(client, updateSQL, allDone, 'could not update default styles for layers'); } // 13. all done function allDone(client) { console.log("Upgrade complete, you can now remove portti_maplayer and portti_layerclass tables from database"); client.end(); }<|fim▁hole|> if(err) { return console.error(errorMsg, err); } callback(client); }); } }<|fim▁end|>
function runSQL(client, sql, callback, errorMsg) { client.query(sql, function(err) {
<|file_name|>IAnyResource.java<|end_file_name|><|fim▁begin|>package org.hl7.fhir.instance.model.api; import ca.uhn.fhir.model.api.annotation.SearchParamDefinition; import ca.uhn.fhir.rest.gclient.TokenClientParam; /* * #%L * HAPI FHIR - Core Library * %% * Copyright (C) 2014 - 2021 Smile CDR, Inc. * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ /** * An IBaseResource that has a FHIR version of DSTU3 or higher */ public interface IAnyResource extends IBaseResource { /** * Search parameter constant for <b>_language</b> */ @SearchParamDefinition(name="_language", path="", description="The language of the resource", type="string" ) String SP_RES_LANGUAGE = "_language"; /** * Search parameter constant for <b>_id</b> */ @SearchParamDefinition(name="_id", path="", description="The ID of the resource", type="token" ) String SP_RES_ID = "_id"; /** * <b>Fluent Client</b> search parameter constant for <b>_id</b> * <p> * Description: <b>the _id of a resource</b><br><|fim▁hole|> * </p> */ TokenClientParam RES_ID = new TokenClientParam(IAnyResource.SP_RES_ID); String getId(); IIdType getIdElement(); IPrimitiveType<String> getLanguageElement(); Object getUserData(String name); IAnyResource setId(String theId); void setUserData(String name, Object value); }<|fim▁end|>
* Type: <b>string</b><br> * Path: <b>Resource._id</b><br>
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>#![feature(box_syntax)] #![feature(asm)] extern crate e2d2; extern crate fnv; extern crate time; extern crate rand; use e2d2::scheduler::*; pub struct DepTask { id: String, deps: Vec<usize>, } impl Executable for DepTask { fn execute(&mut self) { println!("Task -- {}", self.id); } fn dependencies(&mut self) -> Vec<usize> { self.deps.clone() } } impl DepTask { pub fn new(parent: usize, id: &str) -> DepTask { DepTask { id: String::from(id),<|fim▁hole|> fn test_func(id: &str) { println!("Base Task -- {}", id); } fn main() { let mut sched = embedded_scheduler::EmbeddedScheduler::new(); let handle0 = sched.add_task(|| test_func("task-0")).unwrap(); let other_handles = { let mut prev_handle = handle0; let mut nhandles: Vec<_> = (0..10).map(|_| 0).collect(); for i in 0..nhandles.capacity() { nhandles[i] = sched.add_task(DepTask::new(prev_handle, format!("id-{}", i).as_str())).unwrap(); prev_handle = nhandles[i]; } nhandles }; let len = other_handles.len(); sched.exec_task(other_handles[len - 1]); }<|fim▁end|>
deps: vec![parent], } } }
<|file_name|>emmet_css_from_one_line.py<|end_file_name|><|fim▁begin|>import sublime, sublime_plugin, re class EmmetCssFromOneLineCommand(sublime_plugin.TextCommand): def run(self, edit): view = self.view line_region = view.line(view.sel()[0]) line_str = view.substr(line_region) left_padding = re.findall(r'^(\s+)', line_str)[0] # find commands in line<|fim▁hole|> # Delete long string view.replace(edit, line_region, '') def runEmmet(): view.run_command("expand_abbreviation_by_tab") # Processing first element view.insert(edit, view.sel()[0].end(), left_padding + props_array[0]) runEmmet() i = 1 while i < len(props_array): view.insert(edit, view.sel()[0].end(), '\n' + left_padding + props_array[i]) runEmmet() i += 1<|fim▁end|>
props_array = re.findall(r'([a-zA-Z0-9:!;().,?/\-+#]+)', line_str)
<|file_name|>tag.py<|end_file_name|><|fim▁begin|>from functools import partial def build_tag_filter(args): """ Returns a filter which selects entries with all of the given tags only. @param list(str) args, e.g. ["+tag1", "unrelated"] @return (callable filter, list remaining_args) """ remaining_args = [] tags = set() for arg in args: if arg.startswith('+'): tags.add(arg[1:]) else: remaining_args.append(arg) filter = partial(get_entries_with_tags, tags) return filter, remaining_args def get_entries_with_tags(tags, entries): """ Returns all entries which match all of the given tags. @param set tags @return generator[Entry] """ for entry in entries:<|fim▁hole|> for wanted_tag in tags: if wanted_tag.lower() not in entry.get_tags(): skip = True break if not skip: yield entry<|fim▁end|>
skip = False
<|file_name|>models.py<|end_file_name|><|fim▁begin|>from django.conf import settings from django.db import models import jsonfield class Feed(models.Model): name = models.CharField(max_length=1024) url = models.URLField() homepage = models.URLField() etag = models.CharField(max_length=1024, blank=True) last_modified = models.DateTimeField(blank=True, null=True) subscribers = models.ManyToManyField( settings.AUTH_USER_MODEL, related_name='feeds', related_query_name='feed', ) def __str__(self): return '{} ({})'.format(self.name, self.url) class Entry(models.Model): feed = models.ForeignKey(Feed) entry_id = models.CharField(max_length=1024) title = models.CharField(max_length=1024) content = models.TextField() link = models.URLField(max_length=1024) time = models.DateTimeField() json = jsonfield.JSONField() updated = models.DateTimeField(auto_now=True) class Meta: unique_together = (('feed', 'entry_id'),) verbose_name_plural = 'entries' ordering = ['-time'] def __str__(self): return '[{}] {}'.format(self.feed.name, self.title) class UserEntryState(models.Model): user = models.ForeignKey( settings.AUTH_USER_MODEL, related_name='entry_states', related_query_name='entry_state' ) entry = models.ForeignKey( Entry, related_name='user_states', related_query_name='user_state' ) read = models.BooleanField(default=False)<|fim▁hole|> opened = models.BooleanField(default=False) starred = models.BooleanField(default=False) def __str__(self): return '{} - {}'.format(self.user.username, self.entry.title) class UserConfig(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL) mode = models.CharField( max_length=1, choices=(('A', 'All Items'), ('U', 'Unread Only')), default='A', ) sorting = models.CharField( max_length=1, choices=(('T', 'Time'), ('I', 'Intelligence')), default='T', ) def __str__(self): return 'user={}, mode={}, sorting={}'.format( self.user.username, self.mode, self.sorting )<|fim▁end|>
expanded = models.BooleanField(default=False)
<|file_name|>fake_hp_client_exceptions.py<|end_file_name|><|fim▁begin|># (c) Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Fake HP client exceptions to use when mocking HP clients.""" class UnsupportedVersion(Exception): """Unsupported version of the client.""" pass class ClientException(Exception): """The base exception class for these fake exceptions.""" _error_code = None _error_desc = None _error_ref = None _debug1 = None _debug2 = None def __init__(self, error=None): if error: if 'code' in error: self._error_code = error['code'] if 'desc' in error: self._error_desc = error['desc'] if 'ref' in error: self._error_ref = error['ref'] <|fim▁hole|> if 'debug2' in error: self._debug2 = error['debug2'] def get_code(self): return self._error_code def get_description(self): return self._error_desc def get_ref(self): return self._error_ref def __str__(self): formatted_string = self.message if self.http_status: formatted_string += " (HTTP %s)" % self.http_status if self._error_code: formatted_string += " %s" % self._error_code if self._error_desc: formatted_string += " - %s" % self._error_desc if self._error_ref: formatted_string += " - %s" % self._error_ref if self._debug1: formatted_string += " (1: '%s')" % self._debug1 if self._debug2: formatted_string += " (2: '%s')" % self._debug2 return formatted_string class HTTPConflict(Exception): http_status = 409 message = "Conflict" def __init__(self, error=None): if error and 'message' in error: self._error_desc = error['message'] def get_description(self): return self._error_desc class HTTPNotFound(Exception): http_status = 404 message = "Not found" class HTTPForbidden(ClientException): http_status = 403 message = "Forbidden" class HTTPBadRequest(Exception): http_status = 400 message = "Bad request" class HTTPServerError(Exception): http_status = 500 message = "Error" def __init__(self, error=None): if error and 'message' in error: self._error_desc = error['message'] def get_description(self): return self._error_desc<|fim▁end|>
if 'debug1' in error: self._debug1 = error['debug1']